aboutsummaryrefslogtreecommitdiffstats
path: root/fs/gfs2
diff options
context:
space:
mode:
Diffstat (limited to 'fs/gfs2')
-rw-r--r--fs/gfs2/Kconfig47
-rw-r--r--fs/gfs2/bmap.c10
-rw-r--r--fs/gfs2/dir.c25
-rw-r--r--fs/gfs2/dir.h21
-rw-r--r--fs/gfs2/eattr.c8
-rw-r--r--fs/gfs2/glock.c316
-rw-r--r--fs/gfs2/glock.h11
-rw-r--r--fs/gfs2/glops.c136
-rw-r--r--fs/gfs2/incore.h18
-rw-r--r--fs/gfs2/inode.c61
-rw-r--r--fs/gfs2/lm.c8
-rw-r--r--fs/gfs2/locking/dlm/lock_dlm.h2
-rw-r--r--fs/gfs2/locking/dlm/main.c6
-rw-r--r--fs/gfs2/locking/dlm/mount.c6
-rw-r--r--fs/gfs2/locking/dlm/plock.c2
-rw-r--r--fs/gfs2/locking/dlm/sysfs.c13
-rw-r--r--fs/gfs2/lops.c14
-rw-r--r--fs/gfs2/meta_io.c3
-rw-r--r--fs/gfs2/ops_address.c134
-rw-r--r--fs/gfs2/ops_dentry.c16
-rw-r--r--fs/gfs2/ops_export.c15
-rw-r--r--fs/gfs2/ops_file.c52
-rw-r--r--fs/gfs2/ops_inode.c63
-rw-r--r--fs/gfs2/ops_inode.h8
-rw-r--r--fs/gfs2/ops_super.c13
-rw-r--r--fs/gfs2/ops_super.h2
-rw-r--r--fs/gfs2/ops_vm.c24
-rw-r--r--fs/gfs2/super.c16
-rw-r--r--fs/gfs2/sys.c10
29 files changed, 391 insertions, 669 deletions
diff --git a/fs/gfs2/Kconfig b/fs/gfs2/Kconfig
index 6a2ffa2db14f..de8e64c03f73 100644
--- a/fs/gfs2/Kconfig
+++ b/fs/gfs2/Kconfig
@@ -4,44 +4,43 @@ config GFS2_FS
4 select FS_POSIX_ACL 4 select FS_POSIX_ACL
5 select CRC32 5 select CRC32
6 help 6 help
7 A cluster filesystem. 7 A cluster filesystem.
8 8
9 Allows a cluster of computers to simultaneously use a block device 9 Allows a cluster of computers to simultaneously use a block device
10 that is shared between them (with FC, iSCSI, NBD, etc...). GFS reads 10 that is shared between them (with FC, iSCSI, NBD, etc...). GFS reads
11 and writes to the block device like a local filesystem, but also uses 11 and writes to the block device like a local filesystem, but also uses
12 a lock module to allow the computers coordinate their I/O so 12 a lock module to allow the computers coordinate their I/O so
13 filesystem consistency is maintained. One of the nifty features of 13 filesystem consistency is maintained. One of the nifty features of
14 GFS is perfect consistency -- changes made to the filesystem on one 14 GFS is perfect consistency -- changes made to the filesystem on one
15 machine show up immediately on all other machines in the cluster. 15 machine show up immediately on all other machines in the cluster.
16 16
17 To use the GFS2 filesystem, you will need to enable one or more of 17 To use the GFS2 filesystem, you will need to enable one or more of
18 the below locking modules. Documentation and utilities for GFS2 can 18 the below locking modules. Documentation and utilities for GFS2 can
19 be found here: http://sources.redhat.com/cluster 19 be found here: http://sources.redhat.com/cluster
20 20
21config GFS2_FS_LOCKING_NOLOCK 21config GFS2_FS_LOCKING_NOLOCK
22 tristate "GFS2 \"nolock\" locking module" 22 tristate "GFS2 \"nolock\" locking module"
23 depends on GFS2_FS 23 depends on GFS2_FS
24 help 24 help
25 Single node locking module for GFS2. 25 Single node locking module for GFS2.
26 26
27 Use this module if you want to use GFS2 on a single node without 27 Use this module if you want to use GFS2 on a single node without
28 its clustering features. You can still take advantage of the 28 its clustering features. You can still take advantage of the
29 large file support, and upgrade to running a full cluster later on 29 large file support, and upgrade to running a full cluster later on
30 if required. 30 if required.
31 31
32 If you will only be using GFS2 in cluster mode, you do not need this 32 If you will only be using GFS2 in cluster mode, you do not need this
33 module. 33 module.
34 34
35config GFS2_FS_LOCKING_DLM 35config GFS2_FS_LOCKING_DLM
36 tristate "GFS2 DLM locking module" 36 tristate "GFS2 DLM locking module"
37 depends on GFS2_FS && NET && INET && (IPV6 || IPV6=n) 37 depends on GFS2_FS && SYSFS && NET && INET && (IPV6 || IPV6=n)
38 select IP_SCTP if DLM_SCTP 38 select IP_SCTP if DLM_SCTP
39 select CONFIGFS_FS 39 select CONFIGFS_FS
40 select DLM 40 select DLM
41 help 41 help
42 Multiple node locking module for GFS2 42 Multiple node locking module for GFS2
43
44 Most users of GFS2 will require this module. It provides the locking
45 interface between GFS2 and the DLM, which is required to use GFS2
46 in a cluster environment.
47 43
44 Most users of GFS2 will require this module. It provides the locking
45 interface between GFS2 and the DLM, which is required to use GFS2
46 in a cluster environment.
diff --git a/fs/gfs2/bmap.c b/fs/gfs2/bmap.c
index 8240c1ff94f4..113f6c9110c7 100644
--- a/fs/gfs2/bmap.c
+++ b/fs/gfs2/bmap.c
@@ -773,7 +773,7 @@ static int do_strip(struct gfs2_inode *ip, struct buffer_head *dibh,
773 gfs2_free_data(ip, bstart, blen); 773 gfs2_free_data(ip, bstart, blen);
774 } 774 }
775 775
776 ip->i_inode.i_mtime.tv_sec = ip->i_inode.i_ctime.tv_sec = get_seconds(); 776 ip->i_inode.i_mtime = ip->i_inode.i_ctime = CURRENT_TIME_SEC;
777 777
778 gfs2_dinode_out(ip, dibh->b_data); 778 gfs2_dinode_out(ip, dibh->b_data);
779 779
@@ -848,7 +848,7 @@ static int do_grow(struct gfs2_inode *ip, u64 size)
848 } 848 }
849 849
850 ip->i_di.di_size = size; 850 ip->i_di.di_size = size;
851 ip->i_inode.i_mtime.tv_sec = ip->i_inode.i_ctime.tv_sec = get_seconds(); 851 ip->i_inode.i_mtime = ip->i_inode.i_ctime = CURRENT_TIME_SEC;
852 852
853 error = gfs2_meta_inode_buffer(ip, &dibh); 853 error = gfs2_meta_inode_buffer(ip, &dibh);
854 if (error) 854 if (error)
@@ -963,7 +963,7 @@ static int trunc_start(struct gfs2_inode *ip, u64 size)
963 963
964 if (gfs2_is_stuffed(ip)) { 964 if (gfs2_is_stuffed(ip)) {
965 ip->i_di.di_size = size; 965 ip->i_di.di_size = size;
966 ip->i_inode.i_mtime.tv_sec = ip->i_inode.i_ctime.tv_sec = get_seconds(); 966 ip->i_inode.i_mtime = ip->i_inode.i_ctime = CURRENT_TIME_SEC;
967 gfs2_trans_add_bh(ip->i_gl, dibh, 1); 967 gfs2_trans_add_bh(ip->i_gl, dibh, 1);
968 gfs2_dinode_out(ip, dibh->b_data); 968 gfs2_dinode_out(ip, dibh->b_data);
969 gfs2_buffer_clear_tail(dibh, sizeof(struct gfs2_dinode) + size); 969 gfs2_buffer_clear_tail(dibh, sizeof(struct gfs2_dinode) + size);
@@ -975,7 +975,7 @@ static int trunc_start(struct gfs2_inode *ip, u64 size)
975 975
976 if (!error) { 976 if (!error) {
977 ip->i_di.di_size = size; 977 ip->i_di.di_size = size;
978 ip->i_inode.i_mtime.tv_sec = ip->i_inode.i_ctime.tv_sec = get_seconds(); 978 ip->i_inode.i_mtime = ip->i_inode.i_ctime = CURRENT_TIME_SEC;
979 ip->i_di.di_flags |= GFS2_DIF_TRUNC_IN_PROG; 979 ip->i_di.di_flags |= GFS2_DIF_TRUNC_IN_PROG;
980 gfs2_trans_add_bh(ip->i_gl, dibh, 1); 980 gfs2_trans_add_bh(ip->i_gl, dibh, 1);
981 gfs2_dinode_out(ip, dibh->b_data); 981 gfs2_dinode_out(ip, dibh->b_data);
@@ -1048,7 +1048,7 @@ static int trunc_end(struct gfs2_inode *ip)
1048 ip->i_num.no_addr; 1048 ip->i_num.no_addr;
1049 gfs2_buffer_clear_tail(dibh, sizeof(struct gfs2_dinode)); 1049 gfs2_buffer_clear_tail(dibh, sizeof(struct gfs2_dinode));
1050 } 1050 }
1051 ip->i_inode.i_mtime.tv_sec = ip->i_inode.i_ctime.tv_sec = get_seconds(); 1051 ip->i_inode.i_mtime = ip->i_inode.i_ctime = CURRENT_TIME_SEC;
1052 ip->i_di.di_flags &= ~GFS2_DIF_TRUNC_IN_PROG; 1052 ip->i_di.di_flags &= ~GFS2_DIF_TRUNC_IN_PROG;
1053 1053
1054 gfs2_trans_add_bh(ip->i_gl, dibh, 1); 1054 gfs2_trans_add_bh(ip->i_gl, dibh, 1);
diff --git a/fs/gfs2/dir.c b/fs/gfs2/dir.c
index 0fdcb7713cd9..c93ca8f361b5 100644
--- a/fs/gfs2/dir.c
+++ b/fs/gfs2/dir.c
@@ -131,7 +131,7 @@ static int gfs2_dir_write_stuffed(struct gfs2_inode *ip, const char *buf,
131 memcpy(dibh->b_data + offset + sizeof(struct gfs2_dinode), buf, size); 131 memcpy(dibh->b_data + offset + sizeof(struct gfs2_dinode), buf, size);
132 if (ip->i_di.di_size < offset + size) 132 if (ip->i_di.di_size < offset + size)
133 ip->i_di.di_size = offset + size; 133 ip->i_di.di_size = offset + size;
134 ip->i_inode.i_mtime.tv_sec = ip->i_inode.i_ctime.tv_sec = get_seconds(); 134 ip->i_inode.i_mtime = ip->i_inode.i_ctime = CURRENT_TIME_SEC;
135 gfs2_dinode_out(ip, dibh->b_data); 135 gfs2_dinode_out(ip, dibh->b_data);
136 136
137 brelse(dibh); 137 brelse(dibh);
@@ -229,7 +229,7 @@ out:
229 229
230 if (ip->i_di.di_size < offset + copied) 230 if (ip->i_di.di_size < offset + copied)
231 ip->i_di.di_size = offset + copied; 231 ip->i_di.di_size = offset + copied;
232 ip->i_inode.i_mtime.tv_sec = ip->i_inode.i_ctime.tv_sec = get_seconds(); 232 ip->i_inode.i_mtime = ip->i_inode.i_ctime = CURRENT_TIME_SEC;
233 233
234 gfs2_trans_add_bh(ip->i_gl, dibh, 1); 234 gfs2_trans_add_bh(ip->i_gl, dibh, 1);
235 gfs2_dinode_out(ip, dibh->b_data); 235 gfs2_dinode_out(ip, dibh->b_data);
@@ -1198,12 +1198,11 @@ static int compare_dents(const void *a, const void *b)
1198 */ 1198 */
1199 1199
1200static int do_filldir_main(struct gfs2_inode *dip, u64 *offset, 1200static int do_filldir_main(struct gfs2_inode *dip, u64 *offset,
1201 void *opaque, gfs2_filldir_t filldir, 1201 void *opaque, filldir_t filldir,
1202 const struct gfs2_dirent **darr, u32 entries, 1202 const struct gfs2_dirent **darr, u32 entries,
1203 int *copied) 1203 int *copied)
1204{ 1204{
1205 const struct gfs2_dirent *dent, *dent_next; 1205 const struct gfs2_dirent *dent, *dent_next;
1206 struct gfs2_inum_host inum;
1207 u64 off, off_next; 1206 u64 off, off_next;
1208 unsigned int x, y; 1207 unsigned int x, y;
1209 int run = 0; 1208 int run = 0;
@@ -1240,11 +1239,9 @@ static int do_filldir_main(struct gfs2_inode *dip, u64 *offset,
1240 *offset = off; 1239 *offset = off;
1241 } 1240 }
1242 1241
1243 gfs2_inum_in(&inum, (char *)&dent->de_inum);
1244
1245 error = filldir(opaque, (const char *)(dent + 1), 1242 error = filldir(opaque, (const char *)(dent + 1),
1246 be16_to_cpu(dent->de_name_len), 1243 be16_to_cpu(dent->de_name_len),
1247 off, &inum, 1244 off, be64_to_cpu(dent->de_inum.no_addr),
1248 be16_to_cpu(dent->de_type)); 1245 be16_to_cpu(dent->de_type));
1249 if (error) 1246 if (error)
1250 return 1; 1247 return 1;
@@ -1262,8 +1259,8 @@ static int do_filldir_main(struct gfs2_inode *dip, u64 *offset,
1262} 1259}
1263 1260
1264static int gfs2_dir_read_leaf(struct inode *inode, u64 *offset, void *opaque, 1261static int gfs2_dir_read_leaf(struct inode *inode, u64 *offset, void *opaque,
1265 gfs2_filldir_t filldir, int *copied, 1262 filldir_t filldir, int *copied, unsigned *depth,
1266 unsigned *depth, u64 leaf_no) 1263 u64 leaf_no)
1267{ 1264{
1268 struct gfs2_inode *ip = GFS2_I(inode); 1265 struct gfs2_inode *ip = GFS2_I(inode);
1269 struct buffer_head *bh; 1266 struct buffer_head *bh;
@@ -1343,7 +1340,7 @@ out:
1343 */ 1340 */
1344 1341
1345static int dir_e_read(struct inode *inode, u64 *offset, void *opaque, 1342static int dir_e_read(struct inode *inode, u64 *offset, void *opaque,
1346 gfs2_filldir_t filldir) 1343 filldir_t filldir)
1347{ 1344{
1348 struct gfs2_inode *dip = GFS2_I(inode); 1345 struct gfs2_inode *dip = GFS2_I(inode);
1349 struct gfs2_sbd *sdp = GFS2_SB(inode); 1346 struct gfs2_sbd *sdp = GFS2_SB(inode);
@@ -1402,7 +1399,7 @@ out:
1402} 1399}
1403 1400
1404int gfs2_dir_read(struct inode *inode, u64 *offset, void *opaque, 1401int gfs2_dir_read(struct inode *inode, u64 *offset, void *opaque,
1405 gfs2_filldir_t filldir) 1402 filldir_t filldir)
1406{ 1403{
1407 struct gfs2_inode *dip = GFS2_I(inode); 1404 struct gfs2_inode *dip = GFS2_I(inode);
1408 struct dirent_gather g; 1405 struct dirent_gather g;
@@ -1568,7 +1565,7 @@ int gfs2_dir_add(struct inode *inode, const struct qstr *name,
1568 break; 1565 break;
1569 gfs2_trans_add_bh(ip->i_gl, bh, 1); 1566 gfs2_trans_add_bh(ip->i_gl, bh, 1);
1570 ip->i_di.di_entries++; 1567 ip->i_di.di_entries++;
1571 ip->i_inode.i_mtime.tv_sec = ip->i_inode.i_ctime.tv_sec = get_seconds(); 1568 ip->i_inode.i_mtime = ip->i_inode.i_ctime = CURRENT_TIME_SEC;
1572 gfs2_dinode_out(ip, bh->b_data); 1569 gfs2_dinode_out(ip, bh->b_data);
1573 brelse(bh); 1570 brelse(bh);
1574 error = 0; 1571 error = 0;
@@ -1654,7 +1651,7 @@ int gfs2_dir_del(struct gfs2_inode *dip, const struct qstr *name)
1654 gfs2_consist_inode(dip); 1651 gfs2_consist_inode(dip);
1655 gfs2_trans_add_bh(dip->i_gl, bh, 1); 1652 gfs2_trans_add_bh(dip->i_gl, bh, 1);
1656 dip->i_di.di_entries--; 1653 dip->i_di.di_entries--;
1657 dip->i_inode.i_mtime.tv_sec = dip->i_inode.i_ctime.tv_sec = get_seconds(); 1654 dip->i_inode.i_mtime = dip->i_inode.i_ctime = CURRENT_TIME_SEC;
1658 gfs2_dinode_out(dip, bh->b_data); 1655 gfs2_dinode_out(dip, bh->b_data);
1659 brelse(bh); 1656 brelse(bh);
1660 mark_inode_dirty(&dip->i_inode); 1657 mark_inode_dirty(&dip->i_inode);
@@ -1702,7 +1699,7 @@ int gfs2_dir_mvino(struct gfs2_inode *dip, const struct qstr *filename,
1702 gfs2_trans_add_bh(dip->i_gl, bh, 1); 1699 gfs2_trans_add_bh(dip->i_gl, bh, 1);
1703 } 1700 }
1704 1701
1705 dip->i_inode.i_mtime.tv_sec = dip->i_inode.i_ctime.tv_sec = get_seconds(); 1702 dip->i_inode.i_mtime = dip->i_inode.i_ctime = CURRENT_TIME_SEC;
1706 gfs2_dinode_out(dip, bh->b_data); 1703 gfs2_dinode_out(dip, bh->b_data);
1707 brelse(bh); 1704 brelse(bh);
1708 return 0; 1705 return 0;
diff --git a/fs/gfs2/dir.h b/fs/gfs2/dir.h
index b21b33668a5b..48fe89046bba 100644
--- a/fs/gfs2/dir.h
+++ b/fs/gfs2/dir.h
@@ -16,30 +16,13 @@ struct inode;
16struct gfs2_inode; 16struct gfs2_inode;
17struct gfs2_inum; 17struct gfs2_inum;
18 18
19/**
20 * gfs2_filldir_t - Report a directory entry to the caller of gfs2_dir_read()
21 * @opaque: opaque data used by the function
22 * @name: the name of the directory entry
23 * @length: the length of the name
24 * @offset: the entry's offset in the directory
25 * @inum: the inode number the entry points to
26 * @type: the type of inode the entry points to
27 *
28 * Returns: 0 on success, 1 if buffer full
29 */
30
31typedef int (*gfs2_filldir_t) (void *opaque,
32 const char *name, unsigned int length,
33 u64 offset,
34 struct gfs2_inum_host *inum, unsigned int type);
35
36int gfs2_dir_search(struct inode *dir, const struct qstr *filename, 19int gfs2_dir_search(struct inode *dir, const struct qstr *filename,
37 struct gfs2_inum_host *inum, unsigned int *type); 20 struct gfs2_inum_host *inum, unsigned int *type);
38int gfs2_dir_add(struct inode *inode, const struct qstr *filename, 21int gfs2_dir_add(struct inode *inode, const struct qstr *filename,
39 const struct gfs2_inum_host *inum, unsigned int type); 22 const struct gfs2_inum_host *inum, unsigned int type);
40int gfs2_dir_del(struct gfs2_inode *dip, const struct qstr *filename); 23int gfs2_dir_del(struct gfs2_inode *dip, const struct qstr *filename);
41int gfs2_dir_read(struct inode *inode, u64 * offset, void *opaque, 24int gfs2_dir_read(struct inode *inode, u64 *offset, void *opaque,
42 gfs2_filldir_t filldir); 25 filldir_t filldir);
43int gfs2_dir_mvino(struct gfs2_inode *dip, const struct qstr *filename, 26int gfs2_dir_mvino(struct gfs2_inode *dip, const struct qstr *filename,
44 struct gfs2_inum_host *new_inum, unsigned int new_type); 27 struct gfs2_inum_host *new_inum, unsigned int new_type);
45 28
diff --git a/fs/gfs2/eattr.c b/fs/gfs2/eattr.c
index ebebbdcd7057..0c83c7f4dda8 100644
--- a/fs/gfs2/eattr.c
+++ b/fs/gfs2/eattr.c
@@ -301,7 +301,7 @@ static int ea_dealloc_unstuffed(struct gfs2_inode *ip, struct buffer_head *bh,
301 301
302 error = gfs2_meta_inode_buffer(ip, &dibh); 302 error = gfs2_meta_inode_buffer(ip, &dibh);
303 if (!error) { 303 if (!error) {
304 ip->i_inode.i_ctime.tv_sec = get_seconds(); 304 ip->i_inode.i_ctime = CURRENT_TIME_SEC;
305 gfs2_trans_add_bh(ip->i_gl, dibh, 1); 305 gfs2_trans_add_bh(ip->i_gl, dibh, 1);
306 gfs2_dinode_out(ip, dibh->b_data); 306 gfs2_dinode_out(ip, dibh->b_data);
307 brelse(dibh); 307 brelse(dibh);
@@ -718,7 +718,7 @@ static int ea_alloc_skeleton(struct gfs2_inode *ip, struct gfs2_ea_request *er,
718 (er->er_mode & S_IFMT)); 718 (er->er_mode & S_IFMT));
719 ip->i_inode.i_mode = er->er_mode; 719 ip->i_inode.i_mode = er->er_mode;
720 } 720 }
721 ip->i_inode.i_ctime.tv_sec = get_seconds(); 721 ip->i_inode.i_ctime = CURRENT_TIME_SEC;
722 gfs2_trans_add_bh(ip->i_gl, dibh, 1); 722 gfs2_trans_add_bh(ip->i_gl, dibh, 1);
723 gfs2_dinode_out(ip, dibh->b_data); 723 gfs2_dinode_out(ip, dibh->b_data);
724 brelse(dibh); 724 brelse(dibh);
@@ -853,7 +853,7 @@ static int ea_set_simple_noalloc(struct gfs2_inode *ip, struct buffer_head *bh,
853 (ip->i_inode.i_mode & S_IFMT) == (er->er_mode & S_IFMT)); 853 (ip->i_inode.i_mode & S_IFMT) == (er->er_mode & S_IFMT));
854 ip->i_inode.i_mode = er->er_mode; 854 ip->i_inode.i_mode = er->er_mode;
855 } 855 }
856 ip->i_inode.i_ctime.tv_sec = get_seconds(); 856 ip->i_inode.i_ctime = CURRENT_TIME_SEC;
857 gfs2_trans_add_bh(ip->i_gl, dibh, 1); 857 gfs2_trans_add_bh(ip->i_gl, dibh, 1);
858 gfs2_dinode_out(ip, dibh->b_data); 858 gfs2_dinode_out(ip, dibh->b_data);
859 brelse(dibh); 859 brelse(dibh);
@@ -1134,7 +1134,7 @@ static int ea_remove_stuffed(struct gfs2_inode *ip, struct gfs2_ea_location *el)
1134 1134
1135 error = gfs2_meta_inode_buffer(ip, &dibh); 1135 error = gfs2_meta_inode_buffer(ip, &dibh);
1136 if (!error) { 1136 if (!error) {
1137 ip->i_inode.i_ctime.tv_sec = get_seconds(); 1137 ip->i_inode.i_ctime = CURRENT_TIME_SEC;
1138 gfs2_trans_add_bh(ip->i_gl, dibh, 1); 1138 gfs2_trans_add_bh(ip->i_gl, dibh, 1);
1139 gfs2_dinode_out(ip, dibh->b_data); 1139 gfs2_dinode_out(ip, dibh->b_data);
1140 brelse(dibh); 1140 brelse(dibh);
diff --git a/fs/gfs2/glock.c b/fs/gfs2/glock.c
index 438146904b58..6618c1190252 100644
--- a/fs/gfs2/glock.c
+++ b/fs/gfs2/glock.c
@@ -19,6 +19,8 @@
19#include <linux/gfs2_ondisk.h> 19#include <linux/gfs2_ondisk.h>
20#include <linux/list.h> 20#include <linux/list.h>
21#include <linux/lm_interface.h> 21#include <linux/lm_interface.h>
22#include <linux/wait.h>
23#include <linux/rwsem.h>
22#include <asm/uaccess.h> 24#include <asm/uaccess.h>
23 25
24#include "gfs2.h" 26#include "gfs2.h"
@@ -33,11 +35,6 @@
33#include "super.h" 35#include "super.h"
34#include "util.h" 36#include "util.h"
35 37
36struct greedy {
37 struct gfs2_holder gr_gh;
38 struct delayed_work gr_work;
39};
40
41struct gfs2_gl_hash_bucket { 38struct gfs2_gl_hash_bucket {
42 struct hlist_head hb_list; 39 struct hlist_head hb_list;
43}; 40};
@@ -47,6 +44,9 @@ typedef void (*glock_examiner) (struct gfs2_glock * gl);
47static int gfs2_dump_lockstate(struct gfs2_sbd *sdp); 44static int gfs2_dump_lockstate(struct gfs2_sbd *sdp);
48static int dump_glock(struct gfs2_glock *gl); 45static int dump_glock(struct gfs2_glock *gl);
49static int dump_inode(struct gfs2_inode *ip); 46static int dump_inode(struct gfs2_inode *ip);
47static void gfs2_glock_xmote_th(struct gfs2_holder *gh);
48static void gfs2_glock_drop_th(struct gfs2_glock *gl);
49static DECLARE_RWSEM(gfs2_umount_flush_sem);
50 50
51#define GFS2_GL_HASH_SHIFT 15 51#define GFS2_GL_HASH_SHIFT 15
52#define GFS2_GL_HASH_SIZE (1 << GFS2_GL_HASH_SHIFT) 52#define GFS2_GL_HASH_SIZE (1 << GFS2_GL_HASH_SHIFT)
@@ -213,30 +213,6 @@ out:
213} 213}
214 214
215/** 215/**
216 * queue_empty - check to see if a glock's queue is empty
217 * @gl: the glock
218 * @head: the head of the queue to check
219 *
220 * This function protects the list in the event that a process already
221 * has a holder on the list and is adding a second holder for itself.
222 * The glmutex lock is what generally prevents processes from working
223 * on the same glock at once, but the special case of adding a second
224 * holder for yourself ("recursive" locking) doesn't involve locking
225 * glmutex, making the spin lock necessary.
226 *
227 * Returns: 1 if the queue is empty
228 */
229
230static inline int queue_empty(struct gfs2_glock *gl, struct list_head *head)
231{
232 int empty;
233 spin_lock(&gl->gl_spin);
234 empty = list_empty(head);
235 spin_unlock(&gl->gl_spin);
236 return empty;
237}
238
239/**
240 * search_bucket() - Find struct gfs2_glock by lock number 216 * search_bucket() - Find struct gfs2_glock by lock number
241 * @bucket: the bucket to search 217 * @bucket: the bucket to search
242 * @name: The lock name 218 * @name: The lock name
@@ -395,11 +371,6 @@ void gfs2_holder_init(struct gfs2_glock *gl, unsigned int state, unsigned flags,
395 gh->gh_flags = flags; 371 gh->gh_flags = flags;
396 gh->gh_error = 0; 372 gh->gh_error = 0;
397 gh->gh_iflags = 0; 373 gh->gh_iflags = 0;
398 init_completion(&gh->gh_wait);
399
400 if (gh->gh_state == LM_ST_EXCLUSIVE)
401 gh->gh_flags |= GL_LOCAL_EXCL;
402
403 gfs2_glock_hold(gl); 374 gfs2_glock_hold(gl);
404} 375}
405 376
@@ -417,9 +388,6 @@ void gfs2_holder_reinit(unsigned int state, unsigned flags, struct gfs2_holder *
417{ 388{
418 gh->gh_state = state; 389 gh->gh_state = state;
419 gh->gh_flags = flags; 390 gh->gh_flags = flags;
420 if (gh->gh_state == LM_ST_EXCLUSIVE)
421 gh->gh_flags |= GL_LOCAL_EXCL;
422
423 gh->gh_iflags &= 1 << HIF_ALLOCED; 391 gh->gh_iflags &= 1 << HIF_ALLOCED;
424 gh->gh_ip = (unsigned long)__builtin_return_address(0); 392 gh->gh_ip = (unsigned long)__builtin_return_address(0);
425} 393}
@@ -479,6 +447,29 @@ static void gfs2_holder_put(struct gfs2_holder *gh)
479 kfree(gh); 447 kfree(gh);
480} 448}
481 449
450static void gfs2_holder_dispose_or_wake(struct gfs2_holder *gh)
451{
452 if (test_bit(HIF_DEALLOC, &gh->gh_iflags)) {
453 gfs2_holder_put(gh);
454 return;
455 }
456 clear_bit(HIF_WAIT, &gh->gh_iflags);
457 smp_mb();
458 wake_up_bit(&gh->gh_iflags, HIF_WAIT);
459}
460
461static int holder_wait(void *word)
462{
463 schedule();
464 return 0;
465}
466
467static void wait_on_holder(struct gfs2_holder *gh)
468{
469 might_sleep();
470 wait_on_bit(&gh->gh_iflags, HIF_WAIT, holder_wait, TASK_UNINTERRUPTIBLE);
471}
472
482/** 473/**
483 * rq_mutex - process a mutex request in the queue 474 * rq_mutex - process a mutex request in the queue
484 * @gh: the glock holder 475 * @gh: the glock holder
@@ -493,7 +484,9 @@ static int rq_mutex(struct gfs2_holder *gh)
493 list_del_init(&gh->gh_list); 484 list_del_init(&gh->gh_list);
494 /* gh->gh_error never examined. */ 485 /* gh->gh_error never examined. */
495 set_bit(GLF_LOCK, &gl->gl_flags); 486 set_bit(GLF_LOCK, &gl->gl_flags);
496 complete(&gh->gh_wait); 487 clear_bit(HIF_WAIT, &gh->gh_iflags);
488 smp_mb();
489 wake_up_bit(&gh->gh_iflags, HIF_WAIT);
497 490
498 return 1; 491 return 1;
499} 492}
@@ -511,7 +504,6 @@ static int rq_promote(struct gfs2_holder *gh)
511{ 504{
512 struct gfs2_glock *gl = gh->gh_gl; 505 struct gfs2_glock *gl = gh->gh_gl;
513 struct gfs2_sbd *sdp = gl->gl_sbd; 506 struct gfs2_sbd *sdp = gl->gl_sbd;
514 const struct gfs2_glock_operations *glops = gl->gl_ops;
515 507
516 if (!relaxed_state_ok(gl->gl_state, gh->gh_state, gh->gh_flags)) { 508 if (!relaxed_state_ok(gl->gl_state, gh->gh_state, gh->gh_flags)) {
517 if (list_empty(&gl->gl_holders)) { 509 if (list_empty(&gl->gl_holders)) {
@@ -526,7 +518,7 @@ static int rq_promote(struct gfs2_holder *gh)
526 gfs2_reclaim_glock(sdp); 518 gfs2_reclaim_glock(sdp);
527 } 519 }
528 520
529 glops->go_xmote_th(gl, gh->gh_state, gh->gh_flags); 521 gfs2_glock_xmote_th(gh);
530 spin_lock(&gl->gl_spin); 522 spin_lock(&gl->gl_spin);
531 } 523 }
532 return 1; 524 return 1;
@@ -537,11 +529,11 @@ static int rq_promote(struct gfs2_holder *gh)
537 set_bit(GLF_LOCK, &gl->gl_flags); 529 set_bit(GLF_LOCK, &gl->gl_flags);
538 } else { 530 } else {
539 struct gfs2_holder *next_gh; 531 struct gfs2_holder *next_gh;
540 if (gh->gh_flags & GL_LOCAL_EXCL) 532 if (gh->gh_state == LM_ST_EXCLUSIVE)
541 return 1; 533 return 1;
542 next_gh = list_entry(gl->gl_holders.next, struct gfs2_holder, 534 next_gh = list_entry(gl->gl_holders.next, struct gfs2_holder,
543 gh_list); 535 gh_list);
544 if (next_gh->gh_flags & GL_LOCAL_EXCL) 536 if (next_gh->gh_state == LM_ST_EXCLUSIVE)
545 return 1; 537 return 1;
546 } 538 }
547 539
@@ -549,7 +541,7 @@ static int rq_promote(struct gfs2_holder *gh)
549 gh->gh_error = 0; 541 gh->gh_error = 0;
550 set_bit(HIF_HOLDER, &gh->gh_iflags); 542 set_bit(HIF_HOLDER, &gh->gh_iflags);
551 543
552 complete(&gh->gh_wait); 544 gfs2_holder_dispose_or_wake(gh);
553 545
554 return 0; 546 return 0;
555} 547}
@@ -564,7 +556,6 @@ static int rq_promote(struct gfs2_holder *gh)
564static int rq_demote(struct gfs2_holder *gh) 556static int rq_demote(struct gfs2_holder *gh)
565{ 557{
566 struct gfs2_glock *gl = gh->gh_gl; 558 struct gfs2_glock *gl = gh->gh_gl;
567 const struct gfs2_glock_operations *glops = gl->gl_ops;
568 559
569 if (!list_empty(&gl->gl_holders)) 560 if (!list_empty(&gl->gl_holders))
570 return 1; 561 return 1;
@@ -573,10 +564,7 @@ static int rq_demote(struct gfs2_holder *gh)
573 list_del_init(&gh->gh_list); 564 list_del_init(&gh->gh_list);
574 gh->gh_error = 0; 565 gh->gh_error = 0;
575 spin_unlock(&gl->gl_spin); 566 spin_unlock(&gl->gl_spin);
576 if (test_bit(HIF_DEALLOC, &gh->gh_iflags)) 567 gfs2_holder_dispose_or_wake(gh);
577 gfs2_holder_put(gh);
578 else
579 complete(&gh->gh_wait);
580 spin_lock(&gl->gl_spin); 568 spin_lock(&gl->gl_spin);
581 } else { 569 } else {
582 gl->gl_req_gh = gh; 570 gl->gl_req_gh = gh;
@@ -585,9 +573,9 @@ static int rq_demote(struct gfs2_holder *gh)
585 573
586 if (gh->gh_state == LM_ST_UNLOCKED || 574 if (gh->gh_state == LM_ST_UNLOCKED ||
587 gl->gl_state != LM_ST_EXCLUSIVE) 575 gl->gl_state != LM_ST_EXCLUSIVE)
588 glops->go_drop_th(gl); 576 gfs2_glock_drop_th(gl);
589 else 577 else
590 glops->go_xmote_th(gl, gh->gh_state, gh->gh_flags); 578 gfs2_glock_xmote_th(gh);
591 579
592 spin_lock(&gl->gl_spin); 580 spin_lock(&gl->gl_spin);
593 } 581 }
@@ -596,30 +584,6 @@ static int rq_demote(struct gfs2_holder *gh)
596} 584}
597 585
598/** 586/**
599 * rq_greedy - process a queued request to drop greedy status
600 * @gh: the glock holder
601 *
602 * Returns: 1 if the queue is blocked
603 */
604
605static int rq_greedy(struct gfs2_holder *gh)
606{
607 struct gfs2_glock *gl = gh->gh_gl;
608
609 list_del_init(&gh->gh_list);
610 /* gh->gh_error never examined. */
611 clear_bit(GLF_GREEDY, &gl->gl_flags);
612 spin_unlock(&gl->gl_spin);
613
614 gfs2_holder_uninit(gh);
615 kfree(container_of(gh, struct greedy, gr_gh));
616
617 spin_lock(&gl->gl_spin);
618
619 return 0;
620}
621
622/**
623 * run_queue - process holder structures on a glock 587 * run_queue - process holder structures on a glock
624 * @gl: the glock 588 * @gl: the glock
625 * 589 *
@@ -649,8 +613,6 @@ static void run_queue(struct gfs2_glock *gl)
649 613
650 if (test_bit(HIF_DEMOTE, &gh->gh_iflags)) 614 if (test_bit(HIF_DEMOTE, &gh->gh_iflags))
651 blocked = rq_demote(gh); 615 blocked = rq_demote(gh);
652 else if (test_bit(HIF_GREEDY, &gh->gh_iflags))
653 blocked = rq_greedy(gh);
654 else 616 else
655 gfs2_assert_warn(gl->gl_sbd, 0); 617 gfs2_assert_warn(gl->gl_sbd, 0);
656 618
@@ -684,6 +646,8 @@ static void gfs2_glmutex_lock(struct gfs2_glock *gl)
684 646
685 gfs2_holder_init(gl, 0, 0, &gh); 647 gfs2_holder_init(gl, 0, 0, &gh);
686 set_bit(HIF_MUTEX, &gh.gh_iflags); 648 set_bit(HIF_MUTEX, &gh.gh_iflags);
649 if (test_and_set_bit(HIF_WAIT, &gh.gh_iflags))
650 BUG();
687 651
688 spin_lock(&gl->gl_spin); 652 spin_lock(&gl->gl_spin);
689 if (test_and_set_bit(GLF_LOCK, &gl->gl_flags)) { 653 if (test_and_set_bit(GLF_LOCK, &gl->gl_flags)) {
@@ -691,11 +655,13 @@ static void gfs2_glmutex_lock(struct gfs2_glock *gl)
691 } else { 655 } else {
692 gl->gl_owner = current; 656 gl->gl_owner = current;
693 gl->gl_ip = (unsigned long)__builtin_return_address(0); 657 gl->gl_ip = (unsigned long)__builtin_return_address(0);
694 complete(&gh.gh_wait); 658 clear_bit(HIF_WAIT, &gh.gh_iflags);
659 smp_mb();
660 wake_up_bit(&gh.gh_iflags, HIF_WAIT);
695 } 661 }
696 spin_unlock(&gl->gl_spin); 662 spin_unlock(&gl->gl_spin);
697 663
698 wait_for_completion(&gh.gh_wait); 664 wait_on_holder(&gh);
699 gfs2_holder_uninit(&gh); 665 gfs2_holder_uninit(&gh);
700} 666}
701 667
@@ -774,6 +740,7 @@ restart:
774 return; 740 return;
775 set_bit(HIF_DEMOTE, &new_gh->gh_iflags); 741 set_bit(HIF_DEMOTE, &new_gh->gh_iflags);
776 set_bit(HIF_DEALLOC, &new_gh->gh_iflags); 742 set_bit(HIF_DEALLOC, &new_gh->gh_iflags);
743 set_bit(HIF_WAIT, &new_gh->gh_iflags);
777 744
778 goto restart; 745 goto restart;
779 } 746 }
@@ -825,7 +792,7 @@ static void xmote_bh(struct gfs2_glock *gl, unsigned int ret)
825 int op_done = 1; 792 int op_done = 1;
826 793
827 gfs2_assert_warn(sdp, test_bit(GLF_LOCK, &gl->gl_flags)); 794 gfs2_assert_warn(sdp, test_bit(GLF_LOCK, &gl->gl_flags));
828 gfs2_assert_warn(sdp, queue_empty(gl, &gl->gl_holders)); 795 gfs2_assert_warn(sdp, list_empty(&gl->gl_holders));
829 gfs2_assert_warn(sdp, !(ret & LM_OUT_ASYNC)); 796 gfs2_assert_warn(sdp, !(ret & LM_OUT_ASYNC));
830 797
831 state_change(gl, ret & LM_OUT_ST_MASK); 798 state_change(gl, ret & LM_OUT_ST_MASK);
@@ -908,12 +875,8 @@ static void xmote_bh(struct gfs2_glock *gl, unsigned int ret)
908 875
909 gfs2_glock_put(gl); 876 gfs2_glock_put(gl);
910 877
911 if (gh) { 878 if (gh)
912 if (test_bit(HIF_DEALLOC, &gh->gh_iflags)) 879 gfs2_holder_dispose_or_wake(gh);
913 gfs2_holder_put(gh);
914 else
915 complete(&gh->gh_wait);
916 }
917} 880}
918 881
919/** 882/**
@@ -924,23 +887,26 @@ static void xmote_bh(struct gfs2_glock *gl, unsigned int ret)
924 * 887 *
925 */ 888 */
926 889
927void gfs2_glock_xmote_th(struct gfs2_glock *gl, unsigned int state, int flags) 890void gfs2_glock_xmote_th(struct gfs2_holder *gh)
928{ 891{
892 struct gfs2_glock *gl = gh->gh_gl;
929 struct gfs2_sbd *sdp = gl->gl_sbd; 893 struct gfs2_sbd *sdp = gl->gl_sbd;
894 int flags = gh->gh_flags;
895 unsigned state = gh->gh_state;
930 const struct gfs2_glock_operations *glops = gl->gl_ops; 896 const struct gfs2_glock_operations *glops = gl->gl_ops;
931 int lck_flags = flags & (LM_FLAG_TRY | LM_FLAG_TRY_1CB | 897 int lck_flags = flags & (LM_FLAG_TRY | LM_FLAG_TRY_1CB |
932 LM_FLAG_NOEXP | LM_FLAG_ANY | 898 LM_FLAG_NOEXP | LM_FLAG_ANY |
933 LM_FLAG_PRIORITY); 899 LM_FLAG_PRIORITY);
934 unsigned int lck_ret; 900 unsigned int lck_ret;
935 901
902 if (glops->go_xmote_th)
903 glops->go_xmote_th(gl);
904
936 gfs2_assert_warn(sdp, test_bit(GLF_LOCK, &gl->gl_flags)); 905 gfs2_assert_warn(sdp, test_bit(GLF_LOCK, &gl->gl_flags));
937 gfs2_assert_warn(sdp, queue_empty(gl, &gl->gl_holders)); 906 gfs2_assert_warn(sdp, list_empty(&gl->gl_holders));
938 gfs2_assert_warn(sdp, state != LM_ST_UNLOCKED); 907 gfs2_assert_warn(sdp, state != LM_ST_UNLOCKED);
939 gfs2_assert_warn(sdp, state != gl->gl_state); 908 gfs2_assert_warn(sdp, state != gl->gl_state);
940 909
941 if (gl->gl_state == LM_ST_EXCLUSIVE && glops->go_sync)
942 glops->go_sync(gl);
943
944 gfs2_glock_hold(gl); 910 gfs2_glock_hold(gl);
945 gl->gl_req_bh = xmote_bh; 911 gl->gl_req_bh = xmote_bh;
946 912
@@ -971,10 +937,8 @@ static void drop_bh(struct gfs2_glock *gl, unsigned int ret)
971 const struct gfs2_glock_operations *glops = gl->gl_ops; 937 const struct gfs2_glock_operations *glops = gl->gl_ops;
972 struct gfs2_holder *gh = gl->gl_req_gh; 938 struct gfs2_holder *gh = gl->gl_req_gh;
973 939
974 clear_bit(GLF_PREFETCH, &gl->gl_flags);
975
976 gfs2_assert_warn(sdp, test_bit(GLF_LOCK, &gl->gl_flags)); 940 gfs2_assert_warn(sdp, test_bit(GLF_LOCK, &gl->gl_flags));
977 gfs2_assert_warn(sdp, queue_empty(gl, &gl->gl_holders)); 941 gfs2_assert_warn(sdp, list_empty(&gl->gl_holders));
978 gfs2_assert_warn(sdp, !ret); 942 gfs2_assert_warn(sdp, !ret);
979 943
980 state_change(gl, LM_ST_UNLOCKED); 944 state_change(gl, LM_ST_UNLOCKED);
@@ -1001,12 +965,8 @@ static void drop_bh(struct gfs2_glock *gl, unsigned int ret)
1001 965
1002 gfs2_glock_put(gl); 966 gfs2_glock_put(gl);
1003 967
1004 if (gh) { 968 if (gh)
1005 if (test_bit(HIF_DEALLOC, &gh->gh_iflags)) 969 gfs2_holder_dispose_or_wake(gh);
1006 gfs2_holder_put(gh);
1007 else
1008 complete(&gh->gh_wait);
1009 }
1010} 970}
1011 971
1012/** 972/**
@@ -1015,19 +975,19 @@ static void drop_bh(struct gfs2_glock *gl, unsigned int ret)
1015 * 975 *
1016 */ 976 */
1017 977
1018void gfs2_glock_drop_th(struct gfs2_glock *gl) 978static void gfs2_glock_drop_th(struct gfs2_glock *gl)
1019{ 979{
1020 struct gfs2_sbd *sdp = gl->gl_sbd; 980 struct gfs2_sbd *sdp = gl->gl_sbd;
1021 const struct gfs2_glock_operations *glops = gl->gl_ops; 981 const struct gfs2_glock_operations *glops = gl->gl_ops;
1022 unsigned int ret; 982 unsigned int ret;
1023 983
984 if (glops->go_drop_th)
985 glops->go_drop_th(gl);
986
1024 gfs2_assert_warn(sdp, test_bit(GLF_LOCK, &gl->gl_flags)); 987 gfs2_assert_warn(sdp, test_bit(GLF_LOCK, &gl->gl_flags));
1025 gfs2_assert_warn(sdp, queue_empty(gl, &gl->gl_holders)); 988 gfs2_assert_warn(sdp, list_empty(&gl->gl_holders));
1026 gfs2_assert_warn(sdp, gl->gl_state != LM_ST_UNLOCKED); 989 gfs2_assert_warn(sdp, gl->gl_state != LM_ST_UNLOCKED);
1027 990
1028 if (gl->gl_state == LM_ST_EXCLUSIVE && glops->go_sync)
1029 glops->go_sync(gl);
1030
1031 gfs2_glock_hold(gl); 991 gfs2_glock_hold(gl);
1032 gl->gl_req_bh = drop_bh; 992 gl->gl_req_bh = drop_bh;
1033 993
@@ -1107,8 +1067,7 @@ static int glock_wait_internal(struct gfs2_holder *gh)
1107 if (gh->gh_flags & LM_FLAG_PRIORITY) 1067 if (gh->gh_flags & LM_FLAG_PRIORITY)
1108 do_cancels(gh); 1068 do_cancels(gh);
1109 1069
1110 wait_for_completion(&gh->gh_wait); 1070 wait_on_holder(gh);
1111
1112 if (gh->gh_error) 1071 if (gh->gh_error)
1113 return gh->gh_error; 1072 return gh->gh_error;
1114 1073
@@ -1164,6 +1123,8 @@ static void add_to_queue(struct gfs2_holder *gh)
1164 struct gfs2_holder *existing; 1123 struct gfs2_holder *existing;
1165 1124
1166 BUG_ON(!gh->gh_owner); 1125 BUG_ON(!gh->gh_owner);
1126 if (test_and_set_bit(HIF_WAIT, &gh->gh_iflags))
1127 BUG();
1167 1128
1168 existing = find_holder_by_owner(&gl->gl_holders, gh->gh_owner); 1129 existing = find_holder_by_owner(&gl->gl_holders, gh->gh_owner);
1169 if (existing) { 1130 if (existing) {
@@ -1227,8 +1188,6 @@ restart:
1227 } 1188 }
1228 } 1189 }
1229 1190
1230 clear_bit(GLF_PREFETCH, &gl->gl_flags);
1231
1232 return error; 1191 return error;
1233} 1192}
1234 1193
@@ -1321,98 +1280,6 @@ void gfs2_glock_dq(struct gfs2_holder *gh)
1321} 1280}
1322 1281
1323/** 1282/**
1324 * gfs2_glock_prefetch - Try to prefetch a glock
1325 * @gl: the glock
1326 * @state: the state to prefetch in
1327 * @flags: flags passed to go_xmote_th()
1328 *
1329 */
1330
1331static void gfs2_glock_prefetch(struct gfs2_glock *gl, unsigned int state,
1332 int flags)
1333{
1334 const struct gfs2_glock_operations *glops = gl->gl_ops;
1335
1336 spin_lock(&gl->gl_spin);
1337
1338 if (test_bit(GLF_LOCK, &gl->gl_flags) || !list_empty(&gl->gl_holders) ||
1339 !list_empty(&gl->gl_waiters1) || !list_empty(&gl->gl_waiters2) ||
1340 !list_empty(&gl->gl_waiters3) ||
1341 relaxed_state_ok(gl->gl_state, state, flags)) {
1342 spin_unlock(&gl->gl_spin);
1343 return;
1344 }
1345
1346 set_bit(GLF_PREFETCH, &gl->gl_flags);
1347 set_bit(GLF_LOCK, &gl->gl_flags);
1348 spin_unlock(&gl->gl_spin);
1349
1350 glops->go_xmote_th(gl, state, flags);
1351}
1352
1353static void greedy_work(struct work_struct *work)
1354{
1355 struct greedy *gr = container_of(work, struct greedy, gr_work.work);
1356 struct gfs2_holder *gh = &gr->gr_gh;
1357 struct gfs2_glock *gl = gh->gh_gl;
1358 const struct gfs2_glock_operations *glops = gl->gl_ops;
1359
1360 clear_bit(GLF_SKIP_WAITERS2, &gl->gl_flags);
1361
1362 if (glops->go_greedy)
1363 glops->go_greedy(gl);
1364
1365 spin_lock(&gl->gl_spin);
1366
1367 if (list_empty(&gl->gl_waiters2)) {
1368 clear_bit(GLF_GREEDY, &gl->gl_flags);
1369 spin_unlock(&gl->gl_spin);
1370 gfs2_holder_uninit(gh);
1371 kfree(gr);
1372 } else {
1373 gfs2_glock_hold(gl);
1374 list_add_tail(&gh->gh_list, &gl->gl_waiters2);
1375 run_queue(gl);
1376 spin_unlock(&gl->gl_spin);
1377 gfs2_glock_put(gl);
1378 }
1379}
1380
1381/**
1382 * gfs2_glock_be_greedy -
1383 * @gl:
1384 * @time:
1385 *
1386 * Returns: 0 if go_greedy will be called, 1 otherwise
1387 */
1388
1389int gfs2_glock_be_greedy(struct gfs2_glock *gl, unsigned int time)
1390{
1391 struct greedy *gr;
1392 struct gfs2_holder *gh;
1393
1394 if (!time || gl->gl_sbd->sd_args.ar_localcaching ||
1395 test_and_set_bit(GLF_GREEDY, &gl->gl_flags))
1396 return 1;
1397
1398 gr = kmalloc(sizeof(struct greedy), GFP_KERNEL);
1399 if (!gr) {
1400 clear_bit(GLF_GREEDY, &gl->gl_flags);
1401 return 1;
1402 }
1403 gh = &gr->gr_gh;
1404
1405 gfs2_holder_init(gl, 0, 0, gh);
1406 set_bit(HIF_GREEDY, &gh->gh_iflags);
1407 INIT_DELAYED_WORK(&gr->gr_work, greedy_work);
1408
1409 set_bit(GLF_SKIP_WAITERS2, &gl->gl_flags);
1410 schedule_delayed_work(&gr->gr_work, time);
1411
1412 return 0;
1413}
1414
1415/**
1416 * gfs2_glock_dq_uninit - dequeue a holder from a glock and initialize it 1283 * gfs2_glock_dq_uninit - dequeue a holder from a glock and initialize it
1417 * @gh: the holder structure 1284 * @gh: the holder structure
1418 * 1285 *
@@ -1470,10 +1337,7 @@ static int glock_compare(const void *arg_a, const void *arg_b)
1470 return 1; 1337 return 1;
1471 if (a->ln_number < b->ln_number) 1338 if (a->ln_number < b->ln_number)
1472 return -1; 1339 return -1;
1473 if (gh_a->gh_state == LM_ST_SHARED && gh_b->gh_state == LM_ST_EXCLUSIVE) 1340 BUG_ON(gh_a->gh_gl->gl_ops->go_type == gh_b->gh_gl->gl_ops->go_type);
1474 return 1;
1475 if (!(gh_a->gh_flags & GL_LOCAL_EXCL) && (gh_b->gh_flags & GL_LOCAL_EXCL))
1476 return 1;
1477 return 0; 1341 return 0;
1478} 1342}
1479 1343
@@ -1618,34 +1482,6 @@ void gfs2_glock_dq_uninit_m(unsigned int num_gh, struct gfs2_holder *ghs)
1618} 1482}
1619 1483
1620/** 1484/**
1621 * gfs2_glock_prefetch_num - prefetch a glock based on lock number
1622 * @sdp: the filesystem
1623 * @number: the lock number
1624 * @glops: the glock operations for the type of glock
1625 * @state: the state to acquire the glock in
1626 * @flags: modifier flags for the aquisition
1627 *
1628 * Returns: errno
1629 */
1630
1631void gfs2_glock_prefetch_num(struct gfs2_sbd *sdp, u64 number,
1632 const struct gfs2_glock_operations *glops,
1633 unsigned int state, int flags)
1634{
1635 struct gfs2_glock *gl;
1636 int error;
1637
1638 if (atomic_read(&sdp->sd_reclaim_count) <
1639 gfs2_tune_get(sdp, gt_reclaim_limit)) {
1640 error = gfs2_glock_get(sdp, number, glops, CREATE, &gl);
1641 if (!error) {
1642 gfs2_glock_prefetch(gl, state, flags);
1643 gfs2_glock_put(gl);
1644 }
1645 }
1646}
1647
1648/**
1649 * gfs2_lvb_hold - attach a LVB from a glock 1485 * gfs2_lvb_hold - attach a LVB from a glock
1650 * @gl: The glock in question 1486 * @gl: The glock in question
1651 * 1487 *
@@ -1703,8 +1539,6 @@ static void blocking_cb(struct gfs2_sbd *sdp, struct lm_lockname *name,
1703 if (!gl) 1539 if (!gl)
1704 return; 1540 return;
1705 1541
1706 if (gl->gl_ops->go_callback)
1707 gl->gl_ops->go_callback(gl, state);
1708 handle_callback(gl, state); 1542 handle_callback(gl, state);
1709 1543
1710 spin_lock(&gl->gl_spin); 1544 spin_lock(&gl->gl_spin);
@@ -1746,12 +1580,14 @@ void gfs2_glock_cb(void *cb_data, unsigned int type, void *data)
1746 struct lm_async_cb *async = data; 1580 struct lm_async_cb *async = data;
1747 struct gfs2_glock *gl; 1581 struct gfs2_glock *gl;
1748 1582
1583 down_read(&gfs2_umount_flush_sem);
1749 gl = gfs2_glock_find(sdp, &async->lc_name); 1584 gl = gfs2_glock_find(sdp, &async->lc_name);
1750 if (gfs2_assert_warn(sdp, gl)) 1585 if (gfs2_assert_warn(sdp, gl))
1751 return; 1586 return;
1752 if (!gfs2_assert_warn(sdp, gl->gl_req_bh)) 1587 if (!gfs2_assert_warn(sdp, gl->gl_req_bh))
1753 gl->gl_req_bh(gl, async->lc_ret); 1588 gl->gl_req_bh(gl, async->lc_ret);
1754 gfs2_glock_put(gl); 1589 gfs2_glock_put(gl);
1590 up_read(&gfs2_umount_flush_sem);
1755 return; 1591 return;
1756 } 1592 }
1757 1593
@@ -1781,15 +1617,11 @@ void gfs2_glock_cb(void *cb_data, unsigned int type, void *data)
1781 1617
1782static int demote_ok(struct gfs2_glock *gl) 1618static int demote_ok(struct gfs2_glock *gl)
1783{ 1619{
1784 struct gfs2_sbd *sdp = gl->gl_sbd;
1785 const struct gfs2_glock_operations *glops = gl->gl_ops; 1620 const struct gfs2_glock_operations *glops = gl->gl_ops;
1786 int demote = 1; 1621 int demote = 1;
1787 1622
1788 if (test_bit(GLF_STICKY, &gl->gl_flags)) 1623 if (test_bit(GLF_STICKY, &gl->gl_flags))
1789 demote = 0; 1624 demote = 0;
1790 else if (test_bit(GLF_PREFETCH, &gl->gl_flags))
1791 demote = time_after_eq(jiffies, gl->gl_stamp +
1792 gfs2_tune_get(sdp, gt_prefetch_secs) * HZ);
1793 else if (glops->go_demote_ok) 1625 else if (glops->go_demote_ok)
1794 demote = glops->go_demote_ok(gl); 1626 demote = glops->go_demote_ok(gl);
1795 1627
@@ -1845,7 +1677,7 @@ void gfs2_reclaim_glock(struct gfs2_sbd *sdp)
1845 atomic_inc(&sdp->sd_reclaimed); 1677 atomic_inc(&sdp->sd_reclaimed);
1846 1678
1847 if (gfs2_glmutex_trylock(gl)) { 1679 if (gfs2_glmutex_trylock(gl)) {
1848 if (queue_empty(gl, &gl->gl_holders) && 1680 if (list_empty(&gl->gl_holders) &&
1849 gl->gl_state != LM_ST_UNLOCKED && demote_ok(gl)) 1681 gl->gl_state != LM_ST_UNLOCKED && demote_ok(gl))
1850 handle_callback(gl, LM_ST_UNLOCKED); 1682 handle_callback(gl, LM_ST_UNLOCKED);
1851 gfs2_glmutex_unlock(gl); 1683 gfs2_glmutex_unlock(gl);
@@ -1909,7 +1741,7 @@ static void scan_glock(struct gfs2_glock *gl)
1909 return; 1741 return;
1910 1742
1911 if (gfs2_glmutex_trylock(gl)) { 1743 if (gfs2_glmutex_trylock(gl)) {
1912 if (queue_empty(gl, &gl->gl_holders) && 1744 if (list_empty(&gl->gl_holders) &&
1913 gl->gl_state != LM_ST_UNLOCKED && demote_ok(gl)) 1745 gl->gl_state != LM_ST_UNLOCKED && demote_ok(gl))
1914 goto out_schedule; 1746 goto out_schedule;
1915 gfs2_glmutex_unlock(gl); 1747 gfs2_glmutex_unlock(gl);
@@ -1958,7 +1790,7 @@ static void clear_glock(struct gfs2_glock *gl)
1958 } 1790 }
1959 1791
1960 if (gfs2_glmutex_trylock(gl)) { 1792 if (gfs2_glmutex_trylock(gl)) {
1961 if (queue_empty(gl, &gl->gl_holders) && 1793 if (list_empty(&gl->gl_holders) &&
1962 gl->gl_state != LM_ST_UNLOCKED) 1794 gl->gl_state != LM_ST_UNLOCKED)
1963 handle_callback(gl, LM_ST_UNLOCKED); 1795 handle_callback(gl, LM_ST_UNLOCKED);
1964 gfs2_glmutex_unlock(gl); 1796 gfs2_glmutex_unlock(gl);
@@ -2000,7 +1832,9 @@ void gfs2_gl_hash_clear(struct gfs2_sbd *sdp, int wait)
2000 t = jiffies; 1832 t = jiffies;
2001 } 1833 }
2002 1834
1835 down_write(&gfs2_umount_flush_sem);
2003 invalidate_inodes(sdp->sd_vfs); 1836 invalidate_inodes(sdp->sd_vfs);
1837 up_write(&gfs2_umount_flush_sem);
2004 msleep(10); 1838 msleep(10);
2005 } 1839 }
2006} 1840}
diff --git a/fs/gfs2/glock.h b/fs/gfs2/glock.h
index fb39108fc05c..f50e40ceca43 100644
--- a/fs/gfs2/glock.h
+++ b/fs/gfs2/glock.h
@@ -20,7 +20,6 @@
20#define LM_FLAG_ANY 0x00000008 20#define LM_FLAG_ANY 0x00000008
21#define LM_FLAG_PRIORITY 0x00000010 */ 21#define LM_FLAG_PRIORITY 0x00000010 */
22 22
23#define GL_LOCAL_EXCL 0x00000020
24#define GL_ASYNC 0x00000040 23#define GL_ASYNC 0x00000040
25#define GL_EXACT 0x00000080 24#define GL_EXACT 0x00000080
26#define GL_SKIP 0x00000100 25#define GL_SKIP 0x00000100
@@ -83,17 +82,11 @@ void gfs2_holder_init(struct gfs2_glock *gl, unsigned int state, unsigned flags,
83void gfs2_holder_reinit(unsigned int state, unsigned flags, 82void gfs2_holder_reinit(unsigned int state, unsigned flags,
84 struct gfs2_holder *gh); 83 struct gfs2_holder *gh);
85void gfs2_holder_uninit(struct gfs2_holder *gh); 84void gfs2_holder_uninit(struct gfs2_holder *gh);
86
87void gfs2_glock_xmote_th(struct gfs2_glock *gl, unsigned int state, int flags);
88void gfs2_glock_drop_th(struct gfs2_glock *gl);
89
90int gfs2_glock_nq(struct gfs2_holder *gh); 85int gfs2_glock_nq(struct gfs2_holder *gh);
91int gfs2_glock_poll(struct gfs2_holder *gh); 86int gfs2_glock_poll(struct gfs2_holder *gh);
92int gfs2_glock_wait(struct gfs2_holder *gh); 87int gfs2_glock_wait(struct gfs2_holder *gh);
93void gfs2_glock_dq(struct gfs2_holder *gh); 88void gfs2_glock_dq(struct gfs2_holder *gh);
94 89
95int gfs2_glock_be_greedy(struct gfs2_glock *gl, unsigned int time);
96
97void gfs2_glock_dq_uninit(struct gfs2_holder *gh); 90void gfs2_glock_dq_uninit(struct gfs2_holder *gh);
98int gfs2_glock_nq_num(struct gfs2_sbd *sdp, 91int gfs2_glock_nq_num(struct gfs2_sbd *sdp,
99 u64 number, const struct gfs2_glock_operations *glops, 92 u64 number, const struct gfs2_glock_operations *glops,
@@ -103,10 +96,6 @@ int gfs2_glock_nq_m(unsigned int num_gh, struct gfs2_holder *ghs);
103void gfs2_glock_dq_m(unsigned int num_gh, struct gfs2_holder *ghs); 96void gfs2_glock_dq_m(unsigned int num_gh, struct gfs2_holder *ghs);
104void gfs2_glock_dq_uninit_m(unsigned int num_gh, struct gfs2_holder *ghs); 97void gfs2_glock_dq_uninit_m(unsigned int num_gh, struct gfs2_holder *ghs);
105 98
106void gfs2_glock_prefetch_num(struct gfs2_sbd *sdp, u64 number,
107 const struct gfs2_glock_operations *glops,
108 unsigned int state, int flags);
109
110/** 99/**
111 * gfs2_glock_nq_init - intialize a holder and enqueue it on a glock 100 * gfs2_glock_nq_init - intialize a holder and enqueue it on a glock
112 * @gl: the glock 101 * @gl: the glock
diff --git a/fs/gfs2/glops.c b/fs/gfs2/glops.c
index b068d10bcb6e..c4b0391b7aa2 100644
--- a/fs/gfs2/glops.c
+++ b/fs/gfs2/glops.c
@@ -117,12 +117,14 @@ static void gfs2_pte_inval(struct gfs2_glock *gl)
117 117
118static void meta_go_sync(struct gfs2_glock *gl) 118static void meta_go_sync(struct gfs2_glock *gl)
119{ 119{
120 if (gl->gl_state != LM_ST_EXCLUSIVE)
121 return;
122
120 if (test_and_clear_bit(GLF_DIRTY, &gl->gl_flags)) { 123 if (test_and_clear_bit(GLF_DIRTY, &gl->gl_flags)) {
121 gfs2_log_flush(gl->gl_sbd, gl); 124 gfs2_log_flush(gl->gl_sbd, gl);
122 gfs2_meta_sync(gl); 125 gfs2_meta_sync(gl);
123 gfs2_ail_empty_gl(gl); 126 gfs2_ail_empty_gl(gl);
124 } 127 }
125
126} 128}
127 129
128/** 130/**
@@ -142,6 +144,37 @@ static void meta_go_inval(struct gfs2_glock *gl, int flags)
142} 144}
143 145
144/** 146/**
147 * inode_go_sync - Sync the dirty data and/or metadata for an inode glock
148 * @gl: the glock protecting the inode
149 *
150 */
151
152static void inode_go_sync(struct gfs2_glock *gl)
153{
154 struct gfs2_inode *ip = gl->gl_object;
155
156 if (ip && !S_ISREG(ip->i_inode.i_mode))
157 ip = NULL;
158
159 if (test_bit(GLF_DIRTY, &gl->gl_flags)) {
160 gfs2_log_flush(gl->gl_sbd, gl);
161 if (ip)
162 filemap_fdatawrite(ip->i_inode.i_mapping);
163 gfs2_meta_sync(gl);
164 if (ip) {
165 struct address_space *mapping = ip->i_inode.i_mapping;
166 int error = filemap_fdatawait(mapping);
167 if (error == -ENOSPC)
168 set_bit(AS_ENOSPC, &mapping->flags);
169 else if (error)
170 set_bit(AS_EIO, &mapping->flags);
171 }
172 clear_bit(GLF_DIRTY, &gl->gl_flags);
173 gfs2_ail_empty_gl(gl);
174 }
175}
176
177/**
145 * inode_go_xmote_th - promote/demote a glock 178 * inode_go_xmote_th - promote/demote a glock
146 * @gl: the glock 179 * @gl: the glock
147 * @state: the requested state 180 * @state: the requested state
@@ -149,12 +182,12 @@ static void meta_go_inval(struct gfs2_glock *gl, int flags)
149 * 182 *
150 */ 183 */
151 184
152static void inode_go_xmote_th(struct gfs2_glock *gl, unsigned int state, 185static void inode_go_xmote_th(struct gfs2_glock *gl)
153 int flags)
154{ 186{
155 if (gl->gl_state != LM_ST_UNLOCKED) 187 if (gl->gl_state != LM_ST_UNLOCKED)
156 gfs2_pte_inval(gl); 188 gfs2_pte_inval(gl);
157 gfs2_glock_xmote_th(gl, state, flags); 189 if (gl->gl_state == LM_ST_EXCLUSIVE)
190 inode_go_sync(gl);
158} 191}
159 192
160/** 193/**
@@ -189,38 +222,8 @@ static void inode_go_xmote_bh(struct gfs2_glock *gl)
189static void inode_go_drop_th(struct gfs2_glock *gl) 222static void inode_go_drop_th(struct gfs2_glock *gl)
190{ 223{
191 gfs2_pte_inval(gl); 224 gfs2_pte_inval(gl);
192 gfs2_glock_drop_th(gl); 225 if (gl->gl_state == LM_ST_EXCLUSIVE)
193} 226 inode_go_sync(gl);
194
195/**
196 * inode_go_sync - Sync the dirty data and/or metadata for an inode glock
197 * @gl: the glock protecting the inode
198 *
199 */
200
201static void inode_go_sync(struct gfs2_glock *gl)
202{
203 struct gfs2_inode *ip = gl->gl_object;
204
205 if (ip && !S_ISREG(ip->i_inode.i_mode))
206 ip = NULL;
207
208 if (test_bit(GLF_DIRTY, &gl->gl_flags)) {
209 gfs2_log_flush(gl->gl_sbd, gl);
210 if (ip)
211 filemap_fdatawrite(ip->i_inode.i_mapping);
212 gfs2_meta_sync(gl);
213 if (ip) {
214 struct address_space *mapping = ip->i_inode.i_mapping;
215 int error = filemap_fdatawait(mapping);
216 if (error == -ENOSPC)
217 set_bit(AS_ENOSPC, &mapping->flags);
218 else if (error)
219 set_bit(AS_EIO, &mapping->flags);
220 }
221 clear_bit(GLF_DIRTY, &gl->gl_flags);
222 gfs2_ail_empty_gl(gl);
223 }
224} 227}
225 228
226/** 229/**
@@ -295,7 +298,7 @@ static int inode_go_lock(struct gfs2_holder *gh)
295 298
296 if ((ip->i_di.di_flags & GFS2_DIF_TRUNC_IN_PROG) && 299 if ((ip->i_di.di_flags & GFS2_DIF_TRUNC_IN_PROG) &&
297 (gl->gl_state == LM_ST_EXCLUSIVE) && 300 (gl->gl_state == LM_ST_EXCLUSIVE) &&
298 (gh->gh_flags & GL_LOCAL_EXCL)) 301 (gh->gh_state == LM_ST_EXCLUSIVE))
299 error = gfs2_truncatei_resume(ip); 302 error = gfs2_truncatei_resume(ip);
300 303
301 return error; 304 return error;
@@ -319,39 +322,6 @@ static void inode_go_unlock(struct gfs2_holder *gh)
319} 322}
320 323
321/** 324/**
322 * inode_greedy -
323 * @gl: the glock
324 *
325 */
326
327static void inode_greedy(struct gfs2_glock *gl)
328{
329 struct gfs2_sbd *sdp = gl->gl_sbd;
330 struct gfs2_inode *ip = gl->gl_object;
331 unsigned int quantum = gfs2_tune_get(sdp, gt_greedy_quantum);
332 unsigned int max = gfs2_tune_get(sdp, gt_greedy_max);
333 unsigned int new_time;
334
335 spin_lock(&ip->i_spin);
336
337 if (time_after(ip->i_last_pfault + quantum, jiffies)) {
338 new_time = ip->i_greedy + quantum;
339 if (new_time > max)
340 new_time = max;
341 } else {
342 new_time = ip->i_greedy - quantum;
343 if (!new_time || new_time > max)
344 new_time = 1;
345 }
346
347 ip->i_greedy = new_time;
348
349 spin_unlock(&ip->i_spin);
350
351 iput(&ip->i_inode);
352}
353
354/**
355 * rgrp_go_demote_ok - Check to see if it's ok to unlock a RG's glock 325 * rgrp_go_demote_ok - Check to see if it's ok to unlock a RG's glock
356 * @gl: the glock 326 * @gl: the glock
357 * 327 *
@@ -398,8 +368,7 @@ static void rgrp_go_unlock(struct gfs2_holder *gh)
398 * 368 *
399 */ 369 */
400 370
401static void trans_go_xmote_th(struct gfs2_glock *gl, unsigned int state, 371static void trans_go_xmote_th(struct gfs2_glock *gl)
402 int flags)
403{ 372{
404 struct gfs2_sbd *sdp = gl->gl_sbd; 373 struct gfs2_sbd *sdp = gl->gl_sbd;
405 374
@@ -408,8 +377,6 @@ static void trans_go_xmote_th(struct gfs2_glock *gl, unsigned int state,
408 gfs2_meta_syncfs(sdp); 377 gfs2_meta_syncfs(sdp);
409 gfs2_log_shutdown(sdp); 378 gfs2_log_shutdown(sdp);
410 } 379 }
411
412 gfs2_glock_xmote_th(gl, state, flags);
413} 380}
414 381
415/** 382/**
@@ -461,8 +428,6 @@ static void trans_go_drop_th(struct gfs2_glock *gl)
461 gfs2_meta_syncfs(sdp); 428 gfs2_meta_syncfs(sdp);
462 gfs2_log_shutdown(sdp); 429 gfs2_log_shutdown(sdp);
463 } 430 }
464
465 gfs2_glock_drop_th(gl);
466} 431}
467 432
468/** 433/**
@@ -478,8 +443,8 @@ static int quota_go_demote_ok(struct gfs2_glock *gl)
478} 443}
479 444
480const struct gfs2_glock_operations gfs2_meta_glops = { 445const struct gfs2_glock_operations gfs2_meta_glops = {
481 .go_xmote_th = gfs2_glock_xmote_th, 446 .go_xmote_th = meta_go_sync,
482 .go_drop_th = gfs2_glock_drop_th, 447 .go_drop_th = meta_go_sync,
483 .go_type = LM_TYPE_META, 448 .go_type = LM_TYPE_META,
484}; 449};
485 450
@@ -487,19 +452,14 @@ const struct gfs2_glock_operations gfs2_inode_glops = {
487 .go_xmote_th = inode_go_xmote_th, 452 .go_xmote_th = inode_go_xmote_th,
488 .go_xmote_bh = inode_go_xmote_bh, 453 .go_xmote_bh = inode_go_xmote_bh,
489 .go_drop_th = inode_go_drop_th, 454 .go_drop_th = inode_go_drop_th,
490 .go_sync = inode_go_sync,
491 .go_inval = inode_go_inval, 455 .go_inval = inode_go_inval,
492 .go_demote_ok = inode_go_demote_ok, 456 .go_demote_ok = inode_go_demote_ok,
493 .go_lock = inode_go_lock, 457 .go_lock = inode_go_lock,
494 .go_unlock = inode_go_unlock, 458 .go_unlock = inode_go_unlock,
495 .go_greedy = inode_greedy,
496 .go_type = LM_TYPE_INODE, 459 .go_type = LM_TYPE_INODE,
497}; 460};
498 461
499const struct gfs2_glock_operations gfs2_rgrp_glops = { 462const struct gfs2_glock_operations gfs2_rgrp_glops = {
500 .go_xmote_th = gfs2_glock_xmote_th,
501 .go_drop_th = gfs2_glock_drop_th,
502 .go_sync = meta_go_sync,
503 .go_inval = meta_go_inval, 463 .go_inval = meta_go_inval,
504 .go_demote_ok = rgrp_go_demote_ok, 464 .go_demote_ok = rgrp_go_demote_ok,
505 .go_lock = rgrp_go_lock, 465 .go_lock = rgrp_go_lock,
@@ -515,33 +475,23 @@ const struct gfs2_glock_operations gfs2_trans_glops = {
515}; 475};
516 476
517const struct gfs2_glock_operations gfs2_iopen_glops = { 477const struct gfs2_glock_operations gfs2_iopen_glops = {
518 .go_xmote_th = gfs2_glock_xmote_th,
519 .go_drop_th = gfs2_glock_drop_th,
520 .go_type = LM_TYPE_IOPEN, 478 .go_type = LM_TYPE_IOPEN,
521}; 479};
522 480
523const struct gfs2_glock_operations gfs2_flock_glops = { 481const struct gfs2_glock_operations gfs2_flock_glops = {
524 .go_xmote_th = gfs2_glock_xmote_th,
525 .go_drop_th = gfs2_glock_drop_th,
526 .go_type = LM_TYPE_FLOCK, 482 .go_type = LM_TYPE_FLOCK,
527}; 483};
528 484
529const struct gfs2_glock_operations gfs2_nondisk_glops = { 485const struct gfs2_glock_operations gfs2_nondisk_glops = {
530 .go_xmote_th = gfs2_glock_xmote_th,
531 .go_drop_th = gfs2_glock_drop_th,
532 .go_type = LM_TYPE_NONDISK, 486 .go_type = LM_TYPE_NONDISK,
533}; 487};
534 488
535const struct gfs2_glock_operations gfs2_quota_glops = { 489const struct gfs2_glock_operations gfs2_quota_glops = {
536 .go_xmote_th = gfs2_glock_xmote_th,
537 .go_drop_th = gfs2_glock_drop_th,
538 .go_demote_ok = quota_go_demote_ok, 490 .go_demote_ok = quota_go_demote_ok,
539 .go_type = LM_TYPE_QUOTA, 491 .go_type = LM_TYPE_QUOTA,
540}; 492};
541 493
542const struct gfs2_glock_operations gfs2_journal_glops = { 494const struct gfs2_glock_operations gfs2_journal_glops = {
543 .go_xmote_th = gfs2_glock_xmote_th,
544 .go_drop_th = gfs2_glock_drop_th,
545 .go_type = LM_TYPE_JOURNAL, 495 .go_type = LM_TYPE_JOURNAL,
546}; 496};
547 497
diff --git a/fs/gfs2/incore.h b/fs/gfs2/incore.h
index 734421edae85..12c80fd28db5 100644
--- a/fs/gfs2/incore.h
+++ b/fs/gfs2/incore.h
@@ -101,17 +101,14 @@ struct gfs2_bufdata {
101}; 101};
102 102
103struct gfs2_glock_operations { 103struct gfs2_glock_operations {
104 void (*go_xmote_th) (struct gfs2_glock *gl, unsigned int state, int flags); 104 void (*go_xmote_th) (struct gfs2_glock *gl);
105 void (*go_xmote_bh) (struct gfs2_glock *gl); 105 void (*go_xmote_bh) (struct gfs2_glock *gl);
106 void (*go_drop_th) (struct gfs2_glock *gl); 106 void (*go_drop_th) (struct gfs2_glock *gl);
107 void (*go_drop_bh) (struct gfs2_glock *gl); 107 void (*go_drop_bh) (struct gfs2_glock *gl);
108 void (*go_sync) (struct gfs2_glock *gl);
109 void (*go_inval) (struct gfs2_glock *gl, int flags); 108 void (*go_inval) (struct gfs2_glock *gl, int flags);
110 int (*go_demote_ok) (struct gfs2_glock *gl); 109 int (*go_demote_ok) (struct gfs2_glock *gl);
111 int (*go_lock) (struct gfs2_holder *gh); 110 int (*go_lock) (struct gfs2_holder *gh);
112 void (*go_unlock) (struct gfs2_holder *gh); 111 void (*go_unlock) (struct gfs2_holder *gh);
113 void (*go_callback) (struct gfs2_glock *gl, unsigned int state);
114 void (*go_greedy) (struct gfs2_glock *gl);
115 const int go_type; 112 const int go_type;
116}; 113};
117 114
@@ -120,7 +117,6 @@ enum {
120 HIF_MUTEX = 0, 117 HIF_MUTEX = 0,
121 HIF_PROMOTE = 1, 118 HIF_PROMOTE = 1,
122 HIF_DEMOTE = 2, 119 HIF_DEMOTE = 2,
123 HIF_GREEDY = 3,
124 120
125 /* States */ 121 /* States */
126 HIF_ALLOCED = 4, 122 HIF_ALLOCED = 4,
@@ -128,6 +124,7 @@ enum {
128 HIF_HOLDER = 6, 124 HIF_HOLDER = 6,
129 HIF_FIRST = 7, 125 HIF_FIRST = 7,
130 HIF_ABORTED = 9, 126 HIF_ABORTED = 9,
127 HIF_WAIT = 10,
131}; 128};
132 129
133struct gfs2_holder { 130struct gfs2_holder {
@@ -140,17 +137,14 @@ struct gfs2_holder {
140 137
141 int gh_error; 138 int gh_error;
142 unsigned long gh_iflags; 139 unsigned long gh_iflags;
143 struct completion gh_wait;
144 unsigned long gh_ip; 140 unsigned long gh_ip;
145}; 141};
146 142
147enum { 143enum {
148 GLF_LOCK = 1, 144 GLF_LOCK = 1,
149 GLF_STICKY = 2, 145 GLF_STICKY = 2,
150 GLF_PREFETCH = 3,
151 GLF_DIRTY = 5, 146 GLF_DIRTY = 5,
152 GLF_SKIP_WAITERS2 = 6, 147 GLF_SKIP_WAITERS2 = 6,
153 GLF_GREEDY = 7,
154}; 148};
155 149
156struct gfs2_glock { 150struct gfs2_glock {
@@ -167,7 +161,7 @@ struct gfs2_glock {
167 unsigned long gl_ip; 161 unsigned long gl_ip;
168 struct list_head gl_holders; 162 struct list_head gl_holders;
169 struct list_head gl_waiters1; /* HIF_MUTEX */ 163 struct list_head gl_waiters1; /* HIF_MUTEX */
170 struct list_head gl_waiters2; /* HIF_DEMOTE, HIF_GREEDY */ 164 struct list_head gl_waiters2; /* HIF_DEMOTE */
171 struct list_head gl_waiters3; /* HIF_PROMOTE */ 165 struct list_head gl_waiters3; /* HIF_PROMOTE */
172 166
173 const struct gfs2_glock_operations *gl_ops; 167 const struct gfs2_glock_operations *gl_ops;
@@ -236,7 +230,6 @@ struct gfs2_inode {
236 230
237 spinlock_t i_spin; 231 spinlock_t i_spin;
238 struct rw_semaphore i_rw_mutex; 232 struct rw_semaphore i_rw_mutex;
239 unsigned int i_greedy;
240 unsigned long i_last_pfault; 233 unsigned long i_last_pfault;
241 234
242 struct buffer_head *i_cache[GFS2_MAX_META_HEIGHT]; 235 struct buffer_head *i_cache[GFS2_MAX_META_HEIGHT];
@@ -418,17 +411,12 @@ struct gfs2_tune {
418 unsigned int gt_atime_quantum; /* Min secs between atime updates */ 411 unsigned int gt_atime_quantum; /* Min secs between atime updates */
419 unsigned int gt_new_files_jdata; 412 unsigned int gt_new_files_jdata;
420 unsigned int gt_new_files_directio; 413 unsigned int gt_new_files_directio;
421 unsigned int gt_max_atomic_write; /* Split big writes into this size */
422 unsigned int gt_max_readahead; /* Max bytes to read-ahead from disk */ 414 unsigned int gt_max_readahead; /* Max bytes to read-ahead from disk */
423 unsigned int gt_lockdump_size; 415 unsigned int gt_lockdump_size;
424 unsigned int gt_stall_secs; /* Detects trouble! */ 416 unsigned int gt_stall_secs; /* Detects trouble! */
425 unsigned int gt_complain_secs; 417 unsigned int gt_complain_secs;
426 unsigned int gt_reclaim_limit; /* Max num of glocks in reclaim list */ 418 unsigned int gt_reclaim_limit; /* Max num of glocks in reclaim list */
427 unsigned int gt_entries_per_readdir; 419 unsigned int gt_entries_per_readdir;
428 unsigned int gt_prefetch_secs; /* Usage window for prefetched glocks */
429 unsigned int gt_greedy_default;
430 unsigned int gt_greedy_quantum;
431 unsigned int gt_greedy_max;
432 unsigned int gt_statfs_quantum; 420 unsigned int gt_statfs_quantum;
433 unsigned int gt_statfs_slow; 421 unsigned int gt_statfs_slow;
434}; 422};
diff --git a/fs/gfs2/inode.c b/fs/gfs2/inode.c
index d122074c45e1..0d6831a40565 100644
--- a/fs/gfs2/inode.c
+++ b/fs/gfs2/inode.c
@@ -287,10 +287,8 @@ out:
287 * 287 *
288 * Returns: errno 288 * Returns: errno
289 */ 289 */
290
291int gfs2_change_nlink(struct gfs2_inode *ip, int diff) 290int gfs2_change_nlink(struct gfs2_inode *ip, int diff)
292{ 291{
293 struct gfs2_sbd *sdp = ip->i_inode.i_sb->s_fs_info;
294 struct buffer_head *dibh; 292 struct buffer_head *dibh;
295 u32 nlink; 293 u32 nlink;
296 int error; 294 int error;
@@ -315,42 +313,34 @@ int gfs2_change_nlink(struct gfs2_inode *ip, int diff)
315 else 313 else
316 drop_nlink(&ip->i_inode); 314 drop_nlink(&ip->i_inode);
317 315
318 ip->i_inode.i_ctime.tv_sec = get_seconds(); 316 ip->i_inode.i_ctime = CURRENT_TIME_SEC;
319 317
320 gfs2_trans_add_bh(ip->i_gl, dibh, 1); 318 gfs2_trans_add_bh(ip->i_gl, dibh, 1);
321 gfs2_dinode_out(ip, dibh->b_data); 319 gfs2_dinode_out(ip, dibh->b_data);
322 brelse(dibh); 320 brelse(dibh);
323 mark_inode_dirty(&ip->i_inode); 321 mark_inode_dirty(&ip->i_inode);
324 322
325 if (ip->i_inode.i_nlink == 0) { 323 if (ip->i_inode.i_nlink == 0)
326 struct gfs2_rgrpd *rgd;
327 struct gfs2_holder ri_gh, rg_gh;
328
329 error = gfs2_rindex_hold(sdp, &ri_gh);
330 if (error)
331 goto out;
332 error = -EIO;
333 rgd = gfs2_blk2rgrpd(sdp, ip->i_num.no_addr);
334 if (!rgd)
335 goto out_norgrp;
336 error = gfs2_glock_nq_init(rgd->rd_gl, LM_ST_EXCLUSIVE, 0, &rg_gh);
337 if (error)
338 goto out_norgrp;
339
340 gfs2_unlink_di(&ip->i_inode); /* mark inode unlinked */ 324 gfs2_unlink_di(&ip->i_inode); /* mark inode unlinked */
341 gfs2_glock_dq_uninit(&rg_gh); 325
342out_norgrp:
343 gfs2_glock_dq_uninit(&ri_gh);
344 }
345out:
346 return error; 326 return error;
347} 327}
348 328
349struct inode *gfs2_lookup_simple(struct inode *dip, const char *name) 329struct inode *gfs2_lookup_simple(struct inode *dip, const char *name)
350{ 330{
351 struct qstr qstr; 331 struct qstr qstr;
332 struct inode *inode;
352 gfs2_str2qstr(&qstr, name); 333 gfs2_str2qstr(&qstr, name);
353 return gfs2_lookupi(dip, &qstr, 1, NULL); 334 inode = gfs2_lookupi(dip, &qstr, 1, NULL);
335 /* gfs2_lookupi has inconsistent callers: vfs
336 * related routines expect NULL for no entry found,
337 * gfs2_lookup_simple callers expect ENOENT
338 * and do not check for NULL.
339 */
340 if (inode == NULL)
341 return ERR_PTR(-ENOENT);
342 else
343 return inode;
354} 344}
355 345
356 346
@@ -361,8 +351,10 @@ struct inode *gfs2_lookup_simple(struct inode *dip, const char *name)
361 * @is_root: If 1, ignore the caller's permissions 351 * @is_root: If 1, ignore the caller's permissions
362 * @i_gh: An uninitialized holder for the new inode glock 352 * @i_gh: An uninitialized holder for the new inode glock
363 * 353 *
364 * There will always be a vnode (Linux VFS inode) for the d_gh inode unless 354 * This can be called via the VFS filldir function when NFS is doing
365 * @is_root is true. 355 * a readdirplus and the inode which its intending to stat isn't
356 * already in cache. In this case we must not take the directory glock
357 * again, since the readdir call will have already taken that lock.
366 * 358 *
367 * Returns: errno 359 * Returns: errno
368 */ 360 */
@@ -375,8 +367,9 @@ struct inode *gfs2_lookupi(struct inode *dir, const struct qstr *name,
375 struct gfs2_holder d_gh; 367 struct gfs2_holder d_gh;
376 struct gfs2_inum_host inum; 368 struct gfs2_inum_host inum;
377 unsigned int type; 369 unsigned int type;
378 int error = 0; 370 int error;
379 struct inode *inode = NULL; 371 struct inode *inode = NULL;
372 int unlock = 0;
380 373
381 if (!name->len || name->len > GFS2_FNAMESIZE) 374 if (!name->len || name->len > GFS2_FNAMESIZE)
382 return ERR_PTR(-ENAMETOOLONG); 375 return ERR_PTR(-ENAMETOOLONG);
@@ -388,9 +381,12 @@ struct inode *gfs2_lookupi(struct inode *dir, const struct qstr *name,
388 return dir; 381 return dir;
389 } 382 }
390 383
391 error = gfs2_glock_nq_init(dip->i_gl, LM_ST_SHARED, 0, &d_gh); 384 if (gfs2_glock_is_locked_by_me(dip->i_gl) == 0) {
392 if (error) 385 error = gfs2_glock_nq_init(dip->i_gl, LM_ST_SHARED, 0, &d_gh);
393 return ERR_PTR(error); 386 if (error)
387 return ERR_PTR(error);
388 unlock = 1;
389 }
394 390
395 if (!is_root) { 391 if (!is_root) {
396 error = permission(dir, MAY_EXEC, NULL); 392 error = permission(dir, MAY_EXEC, NULL);
@@ -405,10 +401,11 @@ struct inode *gfs2_lookupi(struct inode *dir, const struct qstr *name,
405 inode = gfs2_inode_lookup(sb, &inum, type); 401 inode = gfs2_inode_lookup(sb, &inum, type);
406 402
407out: 403out:
408 gfs2_glock_dq_uninit(&d_gh); 404 if (unlock)
405 gfs2_glock_dq_uninit(&d_gh);
409 if (error == -ENOENT) 406 if (error == -ENOENT)
410 return NULL; 407 return NULL;
411 return inode; 408 return inode ? inode : ERR_PTR(error);
412} 409}
413 410
414static int pick_formal_ino_1(struct gfs2_sbd *sdp, u64 *formal_ino) 411static int pick_formal_ino_1(struct gfs2_sbd *sdp, u64 *formal_ino)
diff --git a/fs/gfs2/lm.c b/fs/gfs2/lm.c
index effe4a337c1d..e30673dd37e0 100644
--- a/fs/gfs2/lm.c
+++ b/fs/gfs2/lm.c
@@ -104,15 +104,9 @@ int gfs2_lm_withdraw(struct gfs2_sbd *sdp, char *fmt, ...)
104 vprintk(fmt, args); 104 vprintk(fmt, args);
105 va_end(args); 105 va_end(args);
106 106
107 fs_err(sdp, "about to withdraw from the cluster\n"); 107 fs_err(sdp, "about to withdraw this file system\n");
108 BUG_ON(sdp->sd_args.ar_debug); 108 BUG_ON(sdp->sd_args.ar_debug);
109 109
110
111 fs_err(sdp, "waiting for outstanding I/O\n");
112
113 /* FIXME: suspend dm device so oustanding bio's complete
114 and all further io requests fail */
115
116 fs_err(sdp, "telling LM to withdraw\n"); 110 fs_err(sdp, "telling LM to withdraw\n");
117 gfs2_withdraw_lockproto(&sdp->sd_lockstruct); 111 gfs2_withdraw_lockproto(&sdp->sd_lockstruct);
118 fs_err(sdp, "withdrawn\n"); 112 fs_err(sdp, "withdrawn\n");
diff --git a/fs/gfs2/locking/dlm/lock_dlm.h b/fs/gfs2/locking/dlm/lock_dlm.h
index 33af707a4d3f..a87c7bf3c568 100644
--- a/fs/gfs2/locking/dlm/lock_dlm.h
+++ b/fs/gfs2/locking/dlm/lock_dlm.h
@@ -36,7 +36,7 @@
36 36
37#define GDLM_STRNAME_BYTES 24 37#define GDLM_STRNAME_BYTES 24
38#define GDLM_LVB_SIZE 32 38#define GDLM_LVB_SIZE 32
39#define GDLM_DROP_COUNT 50000 39#define GDLM_DROP_COUNT 200000
40#define GDLM_DROP_PERIOD 60 40#define GDLM_DROP_PERIOD 60
41#define GDLM_NAME_LEN 128 41#define GDLM_NAME_LEN 128
42 42
diff --git a/fs/gfs2/locking/dlm/main.c b/fs/gfs2/locking/dlm/main.c
index 2194b1d5b5ec..a0e7eda643ed 100644
--- a/fs/gfs2/locking/dlm/main.c
+++ b/fs/gfs2/locking/dlm/main.c
@@ -11,9 +11,6 @@
11 11
12#include "lock_dlm.h" 12#include "lock_dlm.h"
13 13
14extern int gdlm_drop_count;
15extern int gdlm_drop_period;
16
17extern struct lm_lockops gdlm_ops; 14extern struct lm_lockops gdlm_ops;
18 15
19static int __init init_lock_dlm(void) 16static int __init init_lock_dlm(void)
@@ -40,9 +37,6 @@ static int __init init_lock_dlm(void)
40 return error; 37 return error;
41 } 38 }
42 39
43 gdlm_drop_count = GDLM_DROP_COUNT;
44 gdlm_drop_period = GDLM_DROP_PERIOD;
45
46 printk(KERN_INFO 40 printk(KERN_INFO
47 "Lock_DLM (built %s %s) installed\n", __DATE__, __TIME__); 41 "Lock_DLM (built %s %s) installed\n", __DATE__, __TIME__);
48 return 0; 42 return 0;
diff --git a/fs/gfs2/locking/dlm/mount.c b/fs/gfs2/locking/dlm/mount.c
index cdd1694e889b..1d8faa3da8af 100644
--- a/fs/gfs2/locking/dlm/mount.c
+++ b/fs/gfs2/locking/dlm/mount.c
@@ -9,8 +9,6 @@
9 9
10#include "lock_dlm.h" 10#include "lock_dlm.h"
11 11
12int gdlm_drop_count;
13int gdlm_drop_period;
14const struct lm_lockops gdlm_ops; 12const struct lm_lockops gdlm_ops;
15 13
16 14
@@ -24,8 +22,8 @@ static struct gdlm_ls *init_gdlm(lm_callback_t cb, struct gfs2_sbd *sdp,
24 if (!ls) 22 if (!ls)
25 return NULL; 23 return NULL;
26 24
27 ls->drop_locks_count = gdlm_drop_count; 25 ls->drop_locks_count = GDLM_DROP_COUNT;
28 ls->drop_locks_period = gdlm_drop_period; 26 ls->drop_locks_period = GDLM_DROP_PERIOD;
29 ls->fscb = cb; 27 ls->fscb = cb;
30 ls->sdp = sdp; 28 ls->sdp = sdp;
31 ls->fsflags = flags; 29 ls->fsflags = flags;
diff --git a/fs/gfs2/locking/dlm/plock.c b/fs/gfs2/locking/dlm/plock.c
index 3799f19b282f..1dd4215b83d0 100644
--- a/fs/gfs2/locking/dlm/plock.c
+++ b/fs/gfs2/locking/dlm/plock.c
@@ -264,7 +264,7 @@ static unsigned int dev_poll(struct file *file, poll_table *wait)
264 return 0; 264 return 0;
265} 265}
266 266
267static struct file_operations dev_fops = { 267static const struct file_operations dev_fops = {
268 .read = dev_read, 268 .read = dev_read,
269 .write = dev_write, 269 .write = dev_write,
270 .poll = dev_poll, 270 .poll = dev_poll,
diff --git a/fs/gfs2/locking/dlm/sysfs.c b/fs/gfs2/locking/dlm/sysfs.c
index 29ae06f94944..4746b884662d 100644
--- a/fs/gfs2/locking/dlm/sysfs.c
+++ b/fs/gfs2/locking/dlm/sysfs.c
@@ -116,6 +116,17 @@ static ssize_t recover_status_show(struct gdlm_ls *ls, char *buf)
116 return sprintf(buf, "%d\n", ls->recover_jid_status); 116 return sprintf(buf, "%d\n", ls->recover_jid_status);
117} 117}
118 118
119static ssize_t drop_count_show(struct gdlm_ls *ls, char *buf)
120{
121 return sprintf(buf, "%d\n", ls->drop_locks_count);
122}
123
124static ssize_t drop_count_store(struct gdlm_ls *ls, const char *buf, size_t len)
125{
126 ls->drop_locks_count = simple_strtol(buf, NULL, 0);
127 return len;
128}
129
119struct gdlm_attr { 130struct gdlm_attr {
120 struct attribute attr; 131 struct attribute attr;
121 ssize_t (*show)(struct gdlm_ls *, char *); 132 ssize_t (*show)(struct gdlm_ls *, char *);
@@ -135,6 +146,7 @@ GDLM_ATTR(first_done, 0444, first_done_show, NULL);
135GDLM_ATTR(recover, 0644, recover_show, recover_store); 146GDLM_ATTR(recover, 0644, recover_show, recover_store);
136GDLM_ATTR(recover_done, 0444, recover_done_show, NULL); 147GDLM_ATTR(recover_done, 0444, recover_done_show, NULL);
137GDLM_ATTR(recover_status, 0444, recover_status_show, NULL); 148GDLM_ATTR(recover_status, 0444, recover_status_show, NULL);
149GDLM_ATTR(drop_count, 0644, drop_count_show, drop_count_store);
138 150
139static struct attribute *gdlm_attrs[] = { 151static struct attribute *gdlm_attrs[] = {
140 &gdlm_attr_proto_name.attr, 152 &gdlm_attr_proto_name.attr,
@@ -147,6 +159,7 @@ static struct attribute *gdlm_attrs[] = {
147 &gdlm_attr_recover.attr, 159 &gdlm_attr_recover.attr,
148 &gdlm_attr_recover_done.attr, 160 &gdlm_attr_recover_done.attr,
149 &gdlm_attr_recover_status.attr, 161 &gdlm_attr_recover_status.attr,
162 &gdlm_attr_drop_count.attr,
150 NULL, 163 NULL,
151}; 164};
152 165
diff --git a/fs/gfs2/lops.c b/fs/gfs2/lops.c
index 4d7f94d8c7bd..16bb4b4561ae 100644
--- a/fs/gfs2/lops.c
+++ b/fs/gfs2/lops.c
@@ -69,13 +69,16 @@ static void buf_lo_add(struct gfs2_sbd *sdp, struct gfs2_log_element *le)
69 struct gfs2_bufdata *bd = container_of(le, struct gfs2_bufdata, bd_le); 69 struct gfs2_bufdata *bd = container_of(le, struct gfs2_bufdata, bd_le);
70 struct gfs2_trans *tr; 70 struct gfs2_trans *tr;
71 71
72 if (!list_empty(&bd->bd_list_tr)) 72 gfs2_log_lock(sdp);
73 if (!list_empty(&bd->bd_list_tr)) {
74 gfs2_log_unlock(sdp);
73 return; 75 return;
74 76 }
75 tr = current->journal_info; 77 tr = current->journal_info;
76 tr->tr_touched = 1; 78 tr->tr_touched = 1;
77 tr->tr_num_buf++; 79 tr->tr_num_buf++;
78 list_add(&bd->bd_list_tr, &tr->tr_list_buf); 80 list_add(&bd->bd_list_tr, &tr->tr_list_buf);
81 gfs2_log_unlock(sdp);
79 82
80 if (!list_empty(&le->le_list)) 83 if (!list_empty(&le->le_list))
81 return; 84 return;
@@ -84,7 +87,6 @@ static void buf_lo_add(struct gfs2_sbd *sdp, struct gfs2_log_element *le)
84 87
85 gfs2_meta_check(sdp, bd->bd_bh); 88 gfs2_meta_check(sdp, bd->bd_bh);
86 gfs2_pin(sdp, bd->bd_bh); 89 gfs2_pin(sdp, bd->bd_bh);
87
88 gfs2_log_lock(sdp); 90 gfs2_log_lock(sdp);
89 sdp->sd_log_num_buf++; 91 sdp->sd_log_num_buf++;
90 list_add(&le->le_list, &sdp->sd_log_le_buf); 92 list_add(&le->le_list, &sdp->sd_log_le_buf);
@@ -98,11 +100,13 @@ static void buf_lo_incore_commit(struct gfs2_sbd *sdp, struct gfs2_trans *tr)
98 struct list_head *head = &tr->tr_list_buf; 100 struct list_head *head = &tr->tr_list_buf;
99 struct gfs2_bufdata *bd; 101 struct gfs2_bufdata *bd;
100 102
103 gfs2_log_lock(sdp);
101 while (!list_empty(head)) { 104 while (!list_empty(head)) {
102 bd = list_entry(head->next, struct gfs2_bufdata, bd_list_tr); 105 bd = list_entry(head->next, struct gfs2_bufdata, bd_list_tr);
103 list_del_init(&bd->bd_list_tr); 106 list_del_init(&bd->bd_list_tr);
104 tr->tr_num_buf--; 107 tr->tr_num_buf--;
105 } 108 }
109 gfs2_log_unlock(sdp);
106 gfs2_assert_warn(sdp, !tr->tr_num_buf); 110 gfs2_assert_warn(sdp, !tr->tr_num_buf);
107} 111}
108 112
@@ -462,13 +466,17 @@ static void databuf_lo_add(struct gfs2_sbd *sdp, struct gfs2_log_element *le)
462 struct address_space *mapping = bd->bd_bh->b_page->mapping; 466 struct address_space *mapping = bd->bd_bh->b_page->mapping;
463 struct gfs2_inode *ip = GFS2_I(mapping->host); 467 struct gfs2_inode *ip = GFS2_I(mapping->host);
464 468
469 gfs2_log_lock(sdp);
465 tr->tr_touched = 1; 470 tr->tr_touched = 1;
466 if (list_empty(&bd->bd_list_tr) && 471 if (list_empty(&bd->bd_list_tr) &&
467 (ip->i_di.di_flags & GFS2_DIF_JDATA)) { 472 (ip->i_di.di_flags & GFS2_DIF_JDATA)) {
468 tr->tr_num_buf++; 473 tr->tr_num_buf++;
469 list_add(&bd->bd_list_tr, &tr->tr_list_buf); 474 list_add(&bd->bd_list_tr, &tr->tr_list_buf);
475 gfs2_log_unlock(sdp);
470 gfs2_pin(sdp, bd->bd_bh); 476 gfs2_pin(sdp, bd->bd_bh);
471 tr->tr_num_buf_new++; 477 tr->tr_num_buf_new++;
478 } else {
479 gfs2_log_unlock(sdp);
472 } 480 }
473 gfs2_trans_add_gl(bd->bd_gl); 481 gfs2_trans_add_gl(bd->bd_gl);
474 gfs2_log_lock(sdp); 482 gfs2_log_lock(sdp);
diff --git a/fs/gfs2/meta_io.c b/fs/gfs2/meta_io.c
index 0e34d9918973..e62d4f620c58 100644
--- a/fs/gfs2/meta_io.c
+++ b/fs/gfs2/meta_io.c
@@ -282,8 +282,7 @@ void gfs2_attach_bufdata(struct gfs2_glock *gl, struct buffer_head *bh,
282 return; 282 return;
283 } 283 }
284 284
285 bd = kmem_cache_alloc(gfs2_bufdata_cachep, GFP_NOFS | __GFP_NOFAIL), 285 bd = kmem_cache_zalloc(gfs2_bufdata_cachep, GFP_NOFS | __GFP_NOFAIL),
286 memset(bd, 0, sizeof(struct gfs2_bufdata));
287 bd->bd_bh = bh; 286 bd->bd_bh = bh;
288 bd->bd_gl = gl; 287 bd->bd_gl = gl;
289 288
diff --git a/fs/gfs2/ops_address.c b/fs/gfs2/ops_address.c
index d8d69a72a10d..56e33590b656 100644
--- a/fs/gfs2/ops_address.c
+++ b/fs/gfs2/ops_address.c
@@ -16,6 +16,7 @@
16#include <linux/pagevec.h> 16#include <linux/pagevec.h>
17#include <linux/mpage.h> 17#include <linux/mpage.h>
18#include <linux/fs.h> 18#include <linux/fs.h>
19#include <linux/writeback.h>
19#include <linux/gfs2_ondisk.h> 20#include <linux/gfs2_ondisk.h>
20#include <linux/lm_interface.h> 21#include <linux/lm_interface.h>
21 22
@@ -157,6 +158,32 @@ out_ignore:
157} 158}
158 159
159/** 160/**
161 * gfs2_writepages - Write a bunch of dirty pages back to disk
162 * @mapping: The mapping to write
163 * @wbc: Write-back control
164 *
165 * For journaled files and/or ordered writes this just falls back to the
166 * kernel's default writepages path for now. We will probably want to change
167 * that eventually (i.e. when we look at allocate on flush).
168 *
169 * For the data=writeback case though we can already ignore buffer heads
170 * and write whole extents at once. This is a big reduction in the
171 * number of I/O requests we send and the bmap calls we make in this case.
172 */
173static int gfs2_writepages(struct address_space *mapping,
174 struct writeback_control *wbc)
175{
176 struct inode *inode = mapping->host;
177 struct gfs2_inode *ip = GFS2_I(inode);
178 struct gfs2_sbd *sdp = GFS2_SB(inode);
179
180 if (sdp->sd_args.ar_data == GFS2_DATA_WRITEBACK && !gfs2_is_jdata(ip))
181 return mpage_writepages(mapping, wbc, gfs2_get_block_noalloc);
182
183 return generic_writepages(mapping, wbc);
184}
185
186/**
160 * stuffed_readpage - Fill in a Linux page with stuffed file data 187 * stuffed_readpage - Fill in a Linux page with stuffed file data
161 * @ip: the inode 188 * @ip: the inode
162 * @page: the page 189 * @page: the page
@@ -256,7 +283,7 @@ out_unlock:
256 * the page lock and the glock) and return having done no I/O. Its 283 * the page lock and the glock) and return having done no I/O. Its
257 * obviously not something we'd want to do on too regular a basis. 284 * obviously not something we'd want to do on too regular a basis.
258 * Any I/O we ignore at this time will be done via readpage later. 285 * Any I/O we ignore at this time will be done via readpage later.
259 * 2. We have to handle stuffed files here too. 286 * 2. We don't handle stuffed files here we let readpage do the honours.
260 * 3. mpage_readpages() does most of the heavy lifting in the common case. 287 * 3. mpage_readpages() does most of the heavy lifting in the common case.
261 * 4. gfs2_get_block() is relied upon to set BH_Boundary in the right places. 288 * 4. gfs2_get_block() is relied upon to set BH_Boundary in the right places.
262 * 5. We use LM_FLAG_TRY_1CB here, effectively we then have lock-ahead as 289 * 5. We use LM_FLAG_TRY_1CB here, effectively we then have lock-ahead as
@@ -269,8 +296,7 @@ static int gfs2_readpages(struct file *file, struct address_space *mapping,
269 struct gfs2_inode *ip = GFS2_I(inode); 296 struct gfs2_inode *ip = GFS2_I(inode);
270 struct gfs2_sbd *sdp = GFS2_SB(inode); 297 struct gfs2_sbd *sdp = GFS2_SB(inode);
271 struct gfs2_holder gh; 298 struct gfs2_holder gh;
272 unsigned page_idx; 299 int ret = 0;
273 int ret;
274 int do_unlock = 0; 300 int do_unlock = 0;
275 301
276 if (likely(file != &gfs2_internal_file_sentinel)) { 302 if (likely(file != &gfs2_internal_file_sentinel)) {
@@ -289,29 +315,8 @@ static int gfs2_readpages(struct file *file, struct address_space *mapping,
289 goto out_unlock; 315 goto out_unlock;
290 } 316 }
291skip_lock: 317skip_lock:
292 if (gfs2_is_stuffed(ip)) { 318 if (!gfs2_is_stuffed(ip))
293 struct pagevec lru_pvec;
294 pagevec_init(&lru_pvec, 0);
295 for (page_idx = 0; page_idx < nr_pages; page_idx++) {
296 struct page *page = list_entry(pages->prev, struct page, lru);
297 prefetchw(&page->flags);
298 list_del(&page->lru);
299 if (!add_to_page_cache(page, mapping,
300 page->index, GFP_KERNEL)) {
301 ret = stuffed_readpage(ip, page);
302 unlock_page(page);
303 if (!pagevec_add(&lru_pvec, page))
304 __pagevec_lru_add(&lru_pvec);
305 } else {
306 page_cache_release(page);
307 }
308 }
309 pagevec_lru_add(&lru_pvec);
310 ret = 0;
311 } else {
312 /* What we really want to do .... */
313 ret = mpage_readpages(mapping, pages, nr_pages, gfs2_get_block); 319 ret = mpage_readpages(mapping, pages, nr_pages, gfs2_get_block);
314 }
315 320
316 if (do_unlock) { 321 if (do_unlock) {
317 gfs2_glock_dq_m(1, &gh); 322 gfs2_glock_dq_m(1, &gh);
@@ -356,8 +361,10 @@ static int gfs2_prepare_write(struct file *file, struct page *page,
356 gfs2_holder_init(ip->i_gl, LM_ST_EXCLUSIVE, GL_ATIME|LM_FLAG_TRY_1CB, &ip->i_gh); 361 gfs2_holder_init(ip->i_gl, LM_ST_EXCLUSIVE, GL_ATIME|LM_FLAG_TRY_1CB, &ip->i_gh);
357 error = gfs2_glock_nq_atime(&ip->i_gh); 362 error = gfs2_glock_nq_atime(&ip->i_gh);
358 if (unlikely(error)) { 363 if (unlikely(error)) {
359 if (error == GLR_TRYFAILED) 364 if (error == GLR_TRYFAILED) {
365 unlock_page(page);
360 error = AOP_TRUNCATED_PAGE; 366 error = AOP_TRUNCATED_PAGE;
367 }
361 goto out_uninit; 368 goto out_uninit;
362 } 369 }
363 370
@@ -594,6 +601,36 @@ static void gfs2_invalidatepage(struct page *page, unsigned long offset)
594 return; 601 return;
595} 602}
596 603
604/**
605 * gfs2_ok_for_dio - check that dio is valid on this file
606 * @ip: The inode
607 * @rw: READ or WRITE
608 * @offset: The offset at which we are reading or writing
609 *
610 * Returns: 0 (to ignore the i/o request and thus fall back to buffered i/o)
611 * 1 (to accept the i/o request)
612 */
613static int gfs2_ok_for_dio(struct gfs2_inode *ip, int rw, loff_t offset)
614{
615 /*
616 * Should we return an error here? I can't see that O_DIRECT for
617 * a journaled file makes any sense. For now we'll silently fall
618 * back to buffered I/O, likewise we do the same for stuffed
619 * files since they are (a) small and (b) unaligned.
620 */
621 if (gfs2_is_jdata(ip))
622 return 0;
623
624 if (gfs2_is_stuffed(ip))
625 return 0;
626
627 if (offset > i_size_read(&ip->i_inode))
628 return 0;
629 return 1;
630}
631
632
633
597static ssize_t gfs2_direct_IO(int rw, struct kiocb *iocb, 634static ssize_t gfs2_direct_IO(int rw, struct kiocb *iocb,
598 const struct iovec *iov, loff_t offset, 635 const struct iovec *iov, loff_t offset,
599 unsigned long nr_segs) 636 unsigned long nr_segs)
@@ -604,42 +641,28 @@ static ssize_t gfs2_direct_IO(int rw, struct kiocb *iocb,
604 struct gfs2_holder gh; 641 struct gfs2_holder gh;
605 int rv; 642 int rv;
606 643
607 if (rw == READ)
608 mutex_lock(&inode->i_mutex);
609 /* 644 /*
610 * Shared lock, even if its a write, since we do no allocation 645 * Deferred lock, even if its a write, since we do no allocation
611 * on this path. All we need change is atime. 646 * on this path. All we need change is atime, and this lock mode
647 * ensures that other nodes have flushed their buffered read caches
648 * (i.e. their page cache entries for this inode). We do not,
649 * unfortunately have the option of only flushing a range like
650 * the VFS does.
612 */ 651 */
613 gfs2_holder_init(ip->i_gl, LM_ST_SHARED, GL_ATIME, &gh); 652 gfs2_holder_init(ip->i_gl, LM_ST_DEFERRED, GL_ATIME, &gh);
614 rv = gfs2_glock_nq_atime(&gh); 653 rv = gfs2_glock_nq_atime(&gh);
615 if (rv) 654 if (rv)
616 goto out; 655 return rv;
617 656 rv = gfs2_ok_for_dio(ip, rw, offset);
618 if (offset > i_size_read(inode)) 657 if (rv != 1)
619 goto out; 658 goto out; /* dio not valid, fall back to buffered i/o */
620 659
621 /* 660 rv = blockdev_direct_IO_no_locking(rw, iocb, inode, inode->i_sb->s_bdev,
622 * Should we return an error here? I can't see that O_DIRECT for 661 iov, offset, nr_segs,
623 * a journaled file makes any sense. For now we'll silently fall 662 gfs2_get_block_direct, NULL);
624 * back to buffered I/O, likewise we do the same for stuffed
625 * files since they are (a) small and (b) unaligned.
626 */
627 if (gfs2_is_jdata(ip))
628 goto out;
629
630 if (gfs2_is_stuffed(ip))
631 goto out;
632
633 rv = blockdev_direct_IO_own_locking(rw, iocb, inode,
634 inode->i_sb->s_bdev,
635 iov, offset, nr_segs,
636 gfs2_get_block_direct, NULL);
637out: 663out:
638 gfs2_glock_dq_m(1, &gh); 664 gfs2_glock_dq_m(1, &gh);
639 gfs2_holder_uninit(&gh); 665 gfs2_holder_uninit(&gh);
640 if (rw == READ)
641 mutex_unlock(&inode->i_mutex);
642
643 return rv; 666 return rv;
644} 667}
645 668
@@ -763,6 +786,7 @@ out:
763 786
764const struct address_space_operations gfs2_file_aops = { 787const struct address_space_operations gfs2_file_aops = {
765 .writepage = gfs2_writepage, 788 .writepage = gfs2_writepage,
789 .writepages = gfs2_writepages,
766 .readpage = gfs2_readpage, 790 .readpage = gfs2_readpage,
767 .readpages = gfs2_readpages, 791 .readpages = gfs2_readpages,
768 .sync_page = block_sync_page, 792 .sync_page = block_sync_page,
diff --git a/fs/gfs2/ops_dentry.c b/fs/gfs2/ops_dentry.c
index d355899585d8..9187eb174b43 100644
--- a/fs/gfs2/ops_dentry.c
+++ b/fs/gfs2/ops_dentry.c
@@ -46,6 +46,7 @@ static int gfs2_drevalidate(struct dentry *dentry, struct nameidata *nd)
46 struct gfs2_inum_host inum; 46 struct gfs2_inum_host inum;
47 unsigned int type; 47 unsigned int type;
48 int error; 48 int error;
49 int had_lock=0;
49 50
50 if (inode && is_bad_inode(inode)) 51 if (inode && is_bad_inode(inode))
51 goto invalid; 52 goto invalid;
@@ -53,9 +54,12 @@ static int gfs2_drevalidate(struct dentry *dentry, struct nameidata *nd)
53 if (sdp->sd_args.ar_localcaching) 54 if (sdp->sd_args.ar_localcaching)
54 goto valid; 55 goto valid;
55 56
56 error = gfs2_glock_nq_init(dip->i_gl, LM_ST_SHARED, 0, &d_gh); 57 had_lock = gfs2_glock_is_locked_by_me(dip->i_gl);
57 if (error) 58 if (!had_lock) {
58 goto fail; 59 error = gfs2_glock_nq_init(dip->i_gl, LM_ST_SHARED, 0, &d_gh);
60 if (error)
61 goto fail;
62 }
59 63
60 error = gfs2_dir_search(parent->d_inode, &dentry->d_name, &inum, &type); 64 error = gfs2_dir_search(parent->d_inode, &dentry->d_name, &inum, &type);
61 switch (error) { 65 switch (error) {
@@ -82,13 +86,15 @@ static int gfs2_drevalidate(struct dentry *dentry, struct nameidata *nd)
82 } 86 }
83 87
84valid_gunlock: 88valid_gunlock:
85 gfs2_glock_dq_uninit(&d_gh); 89 if (!had_lock)
90 gfs2_glock_dq_uninit(&d_gh);
86valid: 91valid:
87 dput(parent); 92 dput(parent);
88 return 1; 93 return 1;
89 94
90invalid_gunlock: 95invalid_gunlock:
91 gfs2_glock_dq_uninit(&d_gh); 96 if (!had_lock)
97 gfs2_glock_dq_uninit(&d_gh);
92invalid: 98invalid:
93 if (inode && S_ISDIR(inode->i_mode)) { 99 if (inode && S_ISDIR(inode->i_mode)) {
94 if (have_submounts(dentry)) 100 if (have_submounts(dentry))
diff --git a/fs/gfs2/ops_export.c b/fs/gfs2/ops_export.c
index b4e7b8775315..4855e8cca622 100644
--- a/fs/gfs2/ops_export.c
+++ b/fs/gfs2/ops_export.c
@@ -22,6 +22,7 @@
22#include "glock.h" 22#include "glock.h"
23#include "glops.h" 23#include "glops.h"
24#include "inode.h" 24#include "inode.h"
25#include "ops_dentry.h"
25#include "ops_export.h" 26#include "ops_export.h"
26#include "rgrp.h" 27#include "rgrp.h"
27#include "util.h" 28#include "util.h"
@@ -112,13 +113,12 @@ struct get_name_filldir {
112 char *name; 113 char *name;
113}; 114};
114 115
115static int get_name_filldir(void *opaque, const char *name, unsigned int length, 116static int get_name_filldir(void *opaque, const char *name, int length,
116 u64 offset, struct gfs2_inum_host *inum, 117 loff_t offset, u64 inum, unsigned int type)
117 unsigned int type)
118{ 118{
119 struct get_name_filldir *gnfd = (struct get_name_filldir *)opaque; 119 struct get_name_filldir *gnfd = opaque;
120 120
121 if (!gfs2_inum_equal(inum, &gnfd->inum)) 121 if (inum != gnfd->inum.no_addr)
122 return 0; 122 return 0;
123 123
124 memcpy(gnfd->name, name, length); 124 memcpy(gnfd->name, name, length);
@@ -189,6 +189,7 @@ static struct dentry *gfs2_get_parent(struct dentry *child)
189 return ERR_PTR(-ENOMEM); 189 return ERR_PTR(-ENOMEM);
190 } 190 }
191 191
192 dentry->d_op = &gfs2_dops;
192 return dentry; 193 return dentry;
193} 194}
194 195
@@ -215,8 +216,7 @@ static struct dentry *gfs2_get_dentry(struct super_block *sb, void *inum_obj)
215 } 216 }
216 217
217 error = gfs2_glock_nq_num(sdp, inum->no_addr, &gfs2_inode_glops, 218 error = gfs2_glock_nq_num(sdp, inum->no_addr, &gfs2_inode_glops,
218 LM_ST_SHARED, LM_FLAG_ANY | GL_LOCAL_EXCL, 219 LM_ST_SHARED, LM_FLAG_ANY, &i_gh);
219 &i_gh);
220 if (error) 220 if (error)
221 return ERR_PTR(error); 221 return ERR_PTR(error);
222 222
@@ -269,6 +269,7 @@ out_inode:
269 return ERR_PTR(-ENOMEM); 269 return ERR_PTR(-ENOMEM);
270 } 270 }
271 271
272 dentry->d_op = &gfs2_dops;
272 return dentry; 273 return dentry;
273 274
274fail_rgd: 275fail_rgd:
diff --git a/fs/gfs2/ops_file.c b/fs/gfs2/ops_file.c
index faa07e4b97d0..c996aa739a05 100644
--- a/fs/gfs2/ops_file.c
+++ b/fs/gfs2/ops_file.c
@@ -43,15 +43,6 @@
43#include "util.h" 43#include "util.h"
44#include "eaops.h" 44#include "eaops.h"
45 45
46/* For regular, non-NFS */
47struct filldir_reg {
48 struct gfs2_sbd *fdr_sbd;
49 int fdr_prefetch;
50
51 filldir_t fdr_filldir;
52 void *fdr_opaque;
53};
54
55/* 46/*
56 * Most fields left uninitialised to catch anybody who tries to 47 * Most fields left uninitialised to catch anybody who tries to
57 * use them. f_flags set to prevent file_accessed() from touching 48 * use them. f_flags set to prevent file_accessed() from touching
@@ -128,41 +119,6 @@ static loff_t gfs2_llseek(struct file *file, loff_t offset, int origin)
128} 119}
129 120
130/** 121/**
131 * filldir_func - Report a directory entry to the caller of gfs2_dir_read()
132 * @opaque: opaque data used by the function
133 * @name: the name of the directory entry
134 * @length: the length of the name
135 * @offset: the entry's offset in the directory
136 * @inum: the inode number the entry points to
137 * @type: the type of inode the entry points to
138 *
139 * Returns: 0 on success, 1 if buffer full
140 */
141
142static int filldir_func(void *opaque, const char *name, unsigned int length,
143 u64 offset, struct gfs2_inum_host *inum,
144 unsigned int type)
145{
146 struct filldir_reg *fdr = (struct filldir_reg *)opaque;
147 struct gfs2_sbd *sdp = fdr->fdr_sbd;
148 int error;
149
150 error = fdr->fdr_filldir(fdr->fdr_opaque, name, length, offset,
151 inum->no_addr, type);
152 if (error)
153 return 1;
154
155 if (fdr->fdr_prefetch && !(length == 1 && *name == '.')) {
156 gfs2_glock_prefetch_num(sdp, inum->no_addr, &gfs2_inode_glops,
157 LM_ST_SHARED, LM_FLAG_TRY | LM_FLAG_ANY);
158 gfs2_glock_prefetch_num(sdp, inum->no_addr, &gfs2_iopen_glops,
159 LM_ST_SHARED, LM_FLAG_TRY);
160 }
161
162 return 0;
163}
164
165/**
166 * gfs2_readdir - Read directory entries from a directory 122 * gfs2_readdir - Read directory entries from a directory
167 * @file: The directory to read from 123 * @file: The directory to read from
168 * @dirent: Buffer for dirents 124 * @dirent: Buffer for dirents
@@ -175,16 +131,10 @@ static int gfs2_readdir(struct file *file, void *dirent, filldir_t filldir)
175{ 131{
176 struct inode *dir = file->f_mapping->host; 132 struct inode *dir = file->f_mapping->host;
177 struct gfs2_inode *dip = GFS2_I(dir); 133 struct gfs2_inode *dip = GFS2_I(dir);
178 struct filldir_reg fdr;
179 struct gfs2_holder d_gh; 134 struct gfs2_holder d_gh;
180 u64 offset = file->f_pos; 135 u64 offset = file->f_pos;
181 int error; 136 int error;
182 137
183 fdr.fdr_sbd = GFS2_SB(dir);
184 fdr.fdr_prefetch = 1;
185 fdr.fdr_filldir = filldir;
186 fdr.fdr_opaque = dirent;
187
188 gfs2_holder_init(dip->i_gl, LM_ST_SHARED, GL_ATIME, &d_gh); 138 gfs2_holder_init(dip->i_gl, LM_ST_SHARED, GL_ATIME, &d_gh);
189 error = gfs2_glock_nq_atime(&d_gh); 139 error = gfs2_glock_nq_atime(&d_gh);
190 if (error) { 140 if (error) {
@@ -192,7 +142,7 @@ static int gfs2_readdir(struct file *file, void *dirent, filldir_t filldir)
192 return error; 142 return error;
193 } 143 }
194 144
195 error = gfs2_dir_read(dir, &offset, &fdr, filldir_func); 145 error = gfs2_dir_read(dir, &offset, dirent, filldir);
196 146
197 gfs2_glock_dq_uninit(&d_gh); 147 gfs2_glock_dq_uninit(&d_gh);
198 148
diff --git a/fs/gfs2/ops_inode.c b/fs/gfs2/ops_inode.c
index 636dda4c7d38..60f47bf2e8e8 100644
--- a/fs/gfs2/ops_inode.c
+++ b/fs/gfs2/ops_inode.c
@@ -264,13 +264,23 @@ static int gfs2_unlink(struct inode *dir, struct dentry *dentry)
264 struct gfs2_inode *dip = GFS2_I(dir); 264 struct gfs2_inode *dip = GFS2_I(dir);
265 struct gfs2_sbd *sdp = GFS2_SB(dir); 265 struct gfs2_sbd *sdp = GFS2_SB(dir);
266 struct gfs2_inode *ip = GFS2_I(dentry->d_inode); 266 struct gfs2_inode *ip = GFS2_I(dentry->d_inode);
267 struct gfs2_holder ghs[2]; 267 struct gfs2_holder ghs[3];
268 struct gfs2_rgrpd *rgd;
269 struct gfs2_holder ri_gh;
268 int error; 270 int error;
269 271
272 error = gfs2_rindex_hold(sdp, &ri_gh);
273 if (error)
274 return error;
275
270 gfs2_holder_init(dip->i_gl, LM_ST_EXCLUSIVE, 0, ghs); 276 gfs2_holder_init(dip->i_gl, LM_ST_EXCLUSIVE, 0, ghs);
271 gfs2_holder_init(ip->i_gl, LM_ST_EXCLUSIVE, 0, ghs + 1); 277 gfs2_holder_init(ip->i_gl, LM_ST_EXCLUSIVE, 0, ghs + 1);
272 278
273 error = gfs2_glock_nq_m(2, ghs); 279 rgd = gfs2_blk2rgrpd(sdp, ip->i_num.no_addr);
280 gfs2_holder_init(rgd->rd_gl, LM_ST_EXCLUSIVE, 0, ghs + 2);
281
282
283 error = gfs2_glock_nq_m(3, ghs);
274 if (error) 284 if (error)
275 goto out; 285 goto out;
276 286
@@ -291,10 +301,12 @@ static int gfs2_unlink(struct inode *dir, struct dentry *dentry)
291out_end_trans: 301out_end_trans:
292 gfs2_trans_end(sdp); 302 gfs2_trans_end(sdp);
293out_gunlock: 303out_gunlock:
294 gfs2_glock_dq_m(2, ghs); 304 gfs2_glock_dq_m(3, ghs);
295out: 305out:
296 gfs2_holder_uninit(ghs); 306 gfs2_holder_uninit(ghs);
297 gfs2_holder_uninit(ghs + 1); 307 gfs2_holder_uninit(ghs + 1);
308 gfs2_holder_uninit(ghs + 2);
309 gfs2_glock_dq_uninit(&ri_gh);
298 return error; 310 return error;
299} 311}
300 312
@@ -449,13 +461,22 @@ static int gfs2_rmdir(struct inode *dir, struct dentry *dentry)
449 struct gfs2_inode *dip = GFS2_I(dir); 461 struct gfs2_inode *dip = GFS2_I(dir);
450 struct gfs2_sbd *sdp = GFS2_SB(dir); 462 struct gfs2_sbd *sdp = GFS2_SB(dir);
451 struct gfs2_inode *ip = GFS2_I(dentry->d_inode); 463 struct gfs2_inode *ip = GFS2_I(dentry->d_inode);
452 struct gfs2_holder ghs[2]; 464 struct gfs2_holder ghs[3];
465 struct gfs2_rgrpd *rgd;
466 struct gfs2_holder ri_gh;
453 int error; 467 int error;
454 468
469
470 error = gfs2_rindex_hold(sdp, &ri_gh);
471 if (error)
472 return error;
455 gfs2_holder_init(dip->i_gl, LM_ST_EXCLUSIVE, 0, ghs); 473 gfs2_holder_init(dip->i_gl, LM_ST_EXCLUSIVE, 0, ghs);
456 gfs2_holder_init(ip->i_gl, LM_ST_EXCLUSIVE, 0, ghs + 1); 474 gfs2_holder_init(ip->i_gl, LM_ST_EXCLUSIVE, 0, ghs + 1);
457 475
458 error = gfs2_glock_nq_m(2, ghs); 476 rgd = gfs2_blk2rgrpd(sdp, ip->i_num.no_addr);
477 gfs2_holder_init(rgd->rd_gl, LM_ST_EXCLUSIVE, 0, ghs + 2);
478
479 error = gfs2_glock_nq_m(3, ghs);
459 if (error) 480 if (error)
460 goto out; 481 goto out;
461 482
@@ -483,10 +504,12 @@ static int gfs2_rmdir(struct inode *dir, struct dentry *dentry)
483 gfs2_trans_end(sdp); 504 gfs2_trans_end(sdp);
484 505
485out_gunlock: 506out_gunlock:
486 gfs2_glock_dq_m(2, ghs); 507 gfs2_glock_dq_m(3, ghs);
487out: 508out:
488 gfs2_holder_uninit(ghs); 509 gfs2_holder_uninit(ghs);
489 gfs2_holder_uninit(ghs + 1); 510 gfs2_holder_uninit(ghs + 1);
511 gfs2_holder_uninit(ghs + 2);
512 gfs2_glock_dq_uninit(&ri_gh);
490 return error; 513 return error;
491} 514}
492 515
@@ -547,7 +570,8 @@ static int gfs2_rename(struct inode *odir, struct dentry *odentry,
547 struct gfs2_inode *ip = GFS2_I(odentry->d_inode); 570 struct gfs2_inode *ip = GFS2_I(odentry->d_inode);
548 struct gfs2_inode *nip = NULL; 571 struct gfs2_inode *nip = NULL;
549 struct gfs2_sbd *sdp = GFS2_SB(odir); 572 struct gfs2_sbd *sdp = GFS2_SB(odir);
550 struct gfs2_holder ghs[4], r_gh; 573 struct gfs2_holder ghs[5], r_gh;
574 struct gfs2_rgrpd *nrgd;
551 unsigned int num_gh; 575 unsigned int num_gh;
552 int dir_rename = 0; 576 int dir_rename = 0;
553 int alloc_required; 577 int alloc_required;
@@ -587,6 +611,13 @@ static int gfs2_rename(struct inode *odir, struct dentry *odentry,
587 if (nip) { 611 if (nip) {
588 gfs2_holder_init(nip->i_gl, LM_ST_EXCLUSIVE, 0, ghs + num_gh); 612 gfs2_holder_init(nip->i_gl, LM_ST_EXCLUSIVE, 0, ghs + num_gh);
589 num_gh++; 613 num_gh++;
614 /* grab the resource lock for unlink flag twiddling
615 * this is the case of the target file already existing
616 * so we unlink before doing the rename
617 */
618 nrgd = gfs2_blk2rgrpd(sdp, nip->i_num.no_addr);
619 if (nrgd)
620 gfs2_holder_init(nrgd->rd_gl, LM_ST_EXCLUSIVE, 0, ghs + num_gh++);
590 } 621 }
591 622
592 error = gfs2_glock_nq_m(num_gh, ghs); 623 error = gfs2_glock_nq_m(num_gh, ghs);
@@ -684,12 +715,12 @@ static int gfs2_rename(struct inode *odir, struct dentry *odentry,
684 error = gfs2_trans_begin(sdp, sdp->sd_max_dirres + 715 error = gfs2_trans_begin(sdp, sdp->sd_max_dirres +
685 al->al_rgd->rd_ri.ri_length + 716 al->al_rgd->rd_ri.ri_length +
686 4 * RES_DINODE + 4 * RES_LEAF + 717 4 * RES_DINODE + 4 * RES_LEAF +
687 RES_STATFS + RES_QUOTA, 0); 718 RES_STATFS + RES_QUOTA + 4, 0);
688 if (error) 719 if (error)
689 goto out_ipreserv; 720 goto out_ipreserv;
690 } else { 721 } else {
691 error = gfs2_trans_begin(sdp, 4 * RES_DINODE + 722 error = gfs2_trans_begin(sdp, 4 * RES_DINODE +
692 5 * RES_LEAF, 0); 723 5 * RES_LEAF + 4, 0);
693 if (error) 724 if (error)
694 goto out_gunlock; 725 goto out_gunlock;
695 } 726 }
@@ -728,7 +759,7 @@ static int gfs2_rename(struct inode *odir, struct dentry *odentry,
728 error = gfs2_meta_inode_buffer(ip, &dibh); 759 error = gfs2_meta_inode_buffer(ip, &dibh);
729 if (error) 760 if (error)
730 goto out_end_trans; 761 goto out_end_trans;
731 ip->i_inode.i_ctime.tv_sec = get_seconds(); 762 ip->i_inode.i_ctime = CURRENT_TIME_SEC;
732 gfs2_trans_add_bh(ip->i_gl, dibh, 1); 763 gfs2_trans_add_bh(ip->i_gl, dibh, 1);
733 gfs2_dinode_out(ip, dibh->b_data); 764 gfs2_dinode_out(ip, dibh->b_data);
734 brelse(dibh); 765 brelse(dibh);
@@ -1018,7 +1049,7 @@ static int gfs2_getattr(struct vfsmount *mnt, struct dentry *dentry,
1018 } 1049 }
1019 1050
1020 generic_fillattr(inode, stat); 1051 generic_fillattr(inode, stat);
1021 if (unlock); 1052 if (unlock)
1022 gfs2_glock_dq_uninit(&gh); 1053 gfs2_glock_dq_uninit(&gh);
1023 1054
1024 return 0; 1055 return 0;
@@ -1084,7 +1115,7 @@ static int gfs2_removexattr(struct dentry *dentry, const char *name)
1084 return gfs2_ea_remove(GFS2_I(dentry->d_inode), &er); 1115 return gfs2_ea_remove(GFS2_I(dentry->d_inode), &er);
1085} 1116}
1086 1117
1087struct inode_operations gfs2_file_iops = { 1118const struct inode_operations gfs2_file_iops = {
1088 .permission = gfs2_permission, 1119 .permission = gfs2_permission,
1089 .setattr = gfs2_setattr, 1120 .setattr = gfs2_setattr,
1090 .getattr = gfs2_getattr, 1121 .getattr = gfs2_getattr,
@@ -1094,7 +1125,7 @@ struct inode_operations gfs2_file_iops = {
1094 .removexattr = gfs2_removexattr, 1125 .removexattr = gfs2_removexattr,
1095}; 1126};
1096 1127
1097struct inode_operations gfs2_dev_iops = { 1128const struct inode_operations gfs2_dev_iops = {
1098 .permission = gfs2_permission, 1129 .permission = gfs2_permission,
1099 .setattr = gfs2_setattr, 1130 .setattr = gfs2_setattr,
1100 .getattr = gfs2_getattr, 1131 .getattr = gfs2_getattr,
@@ -1104,7 +1135,7 @@ struct inode_operations gfs2_dev_iops = {
1104 .removexattr = gfs2_removexattr, 1135 .removexattr = gfs2_removexattr,
1105}; 1136};
1106 1137
1107struct inode_operations gfs2_dir_iops = { 1138const struct inode_operations gfs2_dir_iops = {
1108 .create = gfs2_create, 1139 .create = gfs2_create,
1109 .lookup = gfs2_lookup, 1140 .lookup = gfs2_lookup,
1110 .link = gfs2_link, 1141 .link = gfs2_link,
@@ -1123,7 +1154,7 @@ struct inode_operations gfs2_dir_iops = {
1123 .removexattr = gfs2_removexattr, 1154 .removexattr = gfs2_removexattr,
1124}; 1155};
1125 1156
1126struct inode_operations gfs2_symlink_iops = { 1157const struct inode_operations gfs2_symlink_iops = {
1127 .readlink = gfs2_readlink, 1158 .readlink = gfs2_readlink,
1128 .follow_link = gfs2_follow_link, 1159 .follow_link = gfs2_follow_link,
1129 .permission = gfs2_permission, 1160 .permission = gfs2_permission,
diff --git a/fs/gfs2/ops_inode.h b/fs/gfs2/ops_inode.h
index b15acb4fd34c..34f0caac1a03 100644
--- a/fs/gfs2/ops_inode.h
+++ b/fs/gfs2/ops_inode.h
@@ -12,9 +12,9 @@
12 12
13#include <linux/fs.h> 13#include <linux/fs.h>
14 14
15extern struct inode_operations gfs2_file_iops; 15extern const struct inode_operations gfs2_file_iops;
16extern struct inode_operations gfs2_dir_iops; 16extern const struct inode_operations gfs2_dir_iops;
17extern struct inode_operations gfs2_symlink_iops; 17extern const struct inode_operations gfs2_symlink_iops;
18extern struct inode_operations gfs2_dev_iops; 18extern const struct inode_operations gfs2_dev_iops;
19 19
20#endif /* __OPS_INODE_DOT_H__ */ 20#endif /* __OPS_INODE_DOT_H__ */
diff --git a/fs/gfs2/ops_super.c b/fs/gfs2/ops_super.c
index 7685b46f934b..b89999d3a767 100644
--- a/fs/gfs2/ops_super.c
+++ b/fs/gfs2/ops_super.c
@@ -173,6 +173,9 @@ static void gfs2_write_super_lockfs(struct super_block *sb)
173 struct gfs2_sbd *sdp = sb->s_fs_info; 173 struct gfs2_sbd *sdp = sb->s_fs_info;
174 int error; 174 int error;
175 175
176 if (test_bit(SDF_SHUTDOWN, &sdp->sd_flags))
177 return;
178
176 for (;;) { 179 for (;;) {
177 error = gfs2_freeze_fs(sdp); 180 error = gfs2_freeze_fs(sdp);
178 if (!error) 181 if (!error)
@@ -426,6 +429,12 @@ static void gfs2_delete_inode(struct inode *inode)
426 } 429 }
427 430
428 error = gfs2_dinode_dealloc(ip); 431 error = gfs2_dinode_dealloc(ip);
432 /*
433 * Must do this before unlock to avoid trying to write back
434 * potentially dirty data now that inode no longer exists
435 * on disk.
436 */
437 truncate_inode_pages(&inode->i_data, 0);
429 438
430out_unlock: 439out_unlock:
431 gfs2_glock_dq(&ip->i_iopen_gh); 440 gfs2_glock_dq(&ip->i_iopen_gh);
@@ -443,14 +452,12 @@ out:
443 452
444static struct inode *gfs2_alloc_inode(struct super_block *sb) 453static struct inode *gfs2_alloc_inode(struct super_block *sb)
445{ 454{
446 struct gfs2_sbd *sdp = sb->s_fs_info;
447 struct gfs2_inode *ip; 455 struct gfs2_inode *ip;
448 456
449 ip = kmem_cache_alloc(gfs2_inode_cachep, GFP_KERNEL); 457 ip = kmem_cache_alloc(gfs2_inode_cachep, GFP_KERNEL);
450 if (ip) { 458 if (ip) {
451 ip->i_flags = 0; 459 ip->i_flags = 0;
452 ip->i_gl = NULL; 460 ip->i_gl = NULL;
453 ip->i_greedy = gfs2_tune_get(sdp, gt_greedy_default);
454 ip->i_last_pfault = jiffies; 461 ip->i_last_pfault = jiffies;
455 } 462 }
456 return &ip->i_inode; 463 return &ip->i_inode;
@@ -461,7 +468,7 @@ static void gfs2_destroy_inode(struct inode *inode)
461 kmem_cache_free(gfs2_inode_cachep, inode); 468 kmem_cache_free(gfs2_inode_cachep, inode);
462} 469}
463 470
464struct super_operations gfs2_super_ops = { 471const struct super_operations gfs2_super_ops = {
465 .alloc_inode = gfs2_alloc_inode, 472 .alloc_inode = gfs2_alloc_inode,
466 .destroy_inode = gfs2_destroy_inode, 473 .destroy_inode = gfs2_destroy_inode,
467 .write_inode = gfs2_write_inode, 474 .write_inode = gfs2_write_inode,
diff --git a/fs/gfs2/ops_super.h b/fs/gfs2/ops_super.h
index 9de73f042f78..442a274c6272 100644
--- a/fs/gfs2/ops_super.h
+++ b/fs/gfs2/ops_super.h
@@ -12,6 +12,6 @@
12 12
13#include <linux/fs.h> 13#include <linux/fs.h>
14 14
15extern struct super_operations gfs2_super_ops; 15extern const struct super_operations gfs2_super_ops;
16 16
17#endif /* __OPS_SUPER_DOT_H__ */ 17#endif /* __OPS_SUPER_DOT_H__ */
diff --git a/fs/gfs2/ops_vm.c b/fs/gfs2/ops_vm.c
index 45a5f11fc39a..14b380fb0602 100644
--- a/fs/gfs2/ops_vm.c
+++ b/fs/gfs2/ops_vm.c
@@ -28,34 +28,13 @@
28#include "trans.h" 28#include "trans.h"
29#include "util.h" 29#include "util.h"
30 30
31static void pfault_be_greedy(struct gfs2_inode *ip)
32{
33 unsigned int time;
34
35 spin_lock(&ip->i_spin);
36 time = ip->i_greedy;
37 ip->i_last_pfault = jiffies;
38 spin_unlock(&ip->i_spin);
39
40 igrab(&ip->i_inode);
41 if (gfs2_glock_be_greedy(ip->i_gl, time))
42 iput(&ip->i_inode);
43}
44
45static struct page *gfs2_private_nopage(struct vm_area_struct *area, 31static struct page *gfs2_private_nopage(struct vm_area_struct *area,
46 unsigned long address, int *type) 32 unsigned long address, int *type)
47{ 33{
48 struct gfs2_inode *ip = GFS2_I(area->vm_file->f_mapping->host); 34 struct gfs2_inode *ip = GFS2_I(area->vm_file->f_mapping->host);
49 struct page *result;
50 35
51 set_bit(GIF_PAGED, &ip->i_flags); 36 set_bit(GIF_PAGED, &ip->i_flags);
52 37 return filemap_nopage(area, address, type);
53 result = filemap_nopage(area, address, type);
54
55 if (result && result != NOPAGE_OOM)
56 pfault_be_greedy(ip);
57
58 return result;
59} 38}
60 39
61static int alloc_page_backing(struct gfs2_inode *ip, struct page *page) 40static int alloc_page_backing(struct gfs2_inode *ip, struct page *page)
@@ -167,7 +146,6 @@ static struct page *gfs2_sharewrite_nopage(struct vm_area_struct *area,
167 set_page_dirty(result); 146 set_page_dirty(result);
168 } 147 }
169 148
170 pfault_be_greedy(ip);
171out: 149out:
172 gfs2_glock_dq_uninit(&i_gh); 150 gfs2_glock_dq_uninit(&i_gh);
173 151
diff --git a/fs/gfs2/super.c b/fs/gfs2/super.c
index 43a24f2e5905..70f424fcf1cd 100644
--- a/fs/gfs2/super.c
+++ b/fs/gfs2/super.c
@@ -71,17 +71,12 @@ void gfs2_tune_init(struct gfs2_tune *gt)
71 gt->gt_atime_quantum = 3600; 71 gt->gt_atime_quantum = 3600;
72 gt->gt_new_files_jdata = 0; 72 gt->gt_new_files_jdata = 0;
73 gt->gt_new_files_directio = 0; 73 gt->gt_new_files_directio = 0;
74 gt->gt_max_atomic_write = 4 << 20;
75 gt->gt_max_readahead = 1 << 18; 74 gt->gt_max_readahead = 1 << 18;
76 gt->gt_lockdump_size = 131072; 75 gt->gt_lockdump_size = 131072;
77 gt->gt_stall_secs = 600; 76 gt->gt_stall_secs = 600;
78 gt->gt_complain_secs = 10; 77 gt->gt_complain_secs = 10;
79 gt->gt_reclaim_limit = 5000; 78 gt->gt_reclaim_limit = 5000;
80 gt->gt_entries_per_readdir = 32; 79 gt->gt_entries_per_readdir = 32;
81 gt->gt_prefetch_secs = 10;
82 gt->gt_greedy_default = HZ / 10;
83 gt->gt_greedy_quantum = HZ / 40;
84 gt->gt_greedy_max = HZ / 4;
85 gt->gt_statfs_quantum = 30; 80 gt->gt_statfs_quantum = 30;
86 gt->gt_statfs_slow = 0; 81 gt->gt_statfs_slow = 0;
87} 82}
@@ -359,8 +354,7 @@ int gfs2_jindex_hold(struct gfs2_sbd *sdp, struct gfs2_holder *ji_gh)
359 mutex_lock(&sdp->sd_jindex_mutex); 354 mutex_lock(&sdp->sd_jindex_mutex);
360 355
361 for (;;) { 356 for (;;) {
362 error = gfs2_glock_nq_init(dip->i_gl, LM_ST_SHARED, 357 error = gfs2_glock_nq_init(dip->i_gl, LM_ST_SHARED, 0, ji_gh);
363 GL_LOCAL_EXCL, ji_gh);
364 if (error) 358 if (error)
365 break; 359 break;
366 360
@@ -529,8 +523,7 @@ int gfs2_make_fs_rw(struct gfs2_sbd *sdp)
529 struct gfs2_log_header_host head; 523 struct gfs2_log_header_host head;
530 int error; 524 int error;
531 525
532 error = gfs2_glock_nq_init(sdp->sd_trans_gl, LM_ST_SHARED, 526 error = gfs2_glock_nq_init(sdp->sd_trans_gl, LM_ST_SHARED, 0, &t_gh);
533 GL_LOCAL_EXCL, &t_gh);
534 if (error) 527 if (error)
535 return error; 528 return error;
536 529
@@ -583,9 +576,8 @@ int gfs2_make_fs_ro(struct gfs2_sbd *sdp)
583 gfs2_quota_sync(sdp); 576 gfs2_quota_sync(sdp);
584 gfs2_statfs_sync(sdp); 577 gfs2_statfs_sync(sdp);
585 578
586 error = gfs2_glock_nq_init(sdp->sd_trans_gl, LM_ST_SHARED, 579 error = gfs2_glock_nq_init(sdp->sd_trans_gl, LM_ST_SHARED, GL_NOCACHE,
587 GL_LOCAL_EXCL | GL_NOCACHE, 580 &t_gh);
588 &t_gh);
589 if (error && !test_bit(SDF_SHUTDOWN, &sdp->sd_flags)) 581 if (error && !test_bit(SDF_SHUTDOWN, &sdp->sd_flags))
590 return error; 582 return error;
591 583
diff --git a/fs/gfs2/sys.c b/fs/gfs2/sys.c
index 983eaf1e06be..d01f9f0fda26 100644
--- a/fs/gfs2/sys.c
+++ b/fs/gfs2/sys.c
@@ -436,17 +436,12 @@ TUNE_ATTR(atime_quantum, 0);
436TUNE_ATTR(max_readahead, 0); 436TUNE_ATTR(max_readahead, 0);
437TUNE_ATTR(complain_secs, 0); 437TUNE_ATTR(complain_secs, 0);
438TUNE_ATTR(reclaim_limit, 0); 438TUNE_ATTR(reclaim_limit, 0);
439TUNE_ATTR(prefetch_secs, 0);
440TUNE_ATTR(statfs_slow, 0); 439TUNE_ATTR(statfs_slow, 0);
441TUNE_ATTR(new_files_jdata, 0); 440TUNE_ATTR(new_files_jdata, 0);
442TUNE_ATTR(new_files_directio, 0); 441TUNE_ATTR(new_files_directio, 0);
443TUNE_ATTR(quota_simul_sync, 1); 442TUNE_ATTR(quota_simul_sync, 1);
444TUNE_ATTR(quota_cache_secs, 1); 443TUNE_ATTR(quota_cache_secs, 1);
445TUNE_ATTR(max_atomic_write, 1);
446TUNE_ATTR(stall_secs, 1); 444TUNE_ATTR(stall_secs, 1);
447TUNE_ATTR(greedy_default, 1);
448TUNE_ATTR(greedy_quantum, 1);
449TUNE_ATTR(greedy_max, 1);
450TUNE_ATTR(statfs_quantum, 1); 445TUNE_ATTR(statfs_quantum, 1);
451TUNE_ATTR_DAEMON(scand_secs, scand_process); 446TUNE_ATTR_DAEMON(scand_secs, scand_process);
452TUNE_ATTR_DAEMON(recoverd_secs, recoverd_process); 447TUNE_ATTR_DAEMON(recoverd_secs, recoverd_process);
@@ -465,15 +460,10 @@ static struct attribute *tune_attrs[] = {
465 &tune_attr_max_readahead.attr, 460 &tune_attr_max_readahead.attr,
466 &tune_attr_complain_secs.attr, 461 &tune_attr_complain_secs.attr,
467 &tune_attr_reclaim_limit.attr, 462 &tune_attr_reclaim_limit.attr,
468 &tune_attr_prefetch_secs.attr,
469 &tune_attr_statfs_slow.attr, 463 &tune_attr_statfs_slow.attr,
470 &tune_attr_quota_simul_sync.attr, 464 &tune_attr_quota_simul_sync.attr,
471 &tune_attr_quota_cache_secs.attr, 465 &tune_attr_quota_cache_secs.attr,
472 &tune_attr_max_atomic_write.attr,
473 &tune_attr_stall_secs.attr, 466 &tune_attr_stall_secs.attr,
474 &tune_attr_greedy_default.attr,
475 &tune_attr_greedy_quantum.attr,
476 &tune_attr_greedy_max.attr,
477 &tune_attr_statfs_quantum.attr, 467 &tune_attr_statfs_quantum.attr,
478 &tune_attr_scand_secs.attr, 468 &tune_attr_scand_secs.attr,
479 &tune_attr_recoverd_secs.attr, 469 &tune_attr_recoverd_secs.attr,