aboutsummaryrefslogtreecommitdiffstats
path: root/fs/gfs2
diff options
context:
space:
mode:
Diffstat (limited to 'fs/gfs2')
-rw-r--r--fs/gfs2/acl.c2
-rw-r--r--fs/gfs2/aops.c17
-rw-r--r--fs/gfs2/bmap.c32
-rw-r--r--fs/gfs2/dir.c32
-rw-r--r--fs/gfs2/file.c6
-rw-r--r--fs/gfs2/glock.c116
-rw-r--r--fs/gfs2/glops.c4
-rw-r--r--fs/gfs2/incore.h11
-rw-r--r--fs/gfs2/inode.c40
-rw-r--r--fs/gfs2/lock_dlm.c7
-rw-r--r--fs/gfs2/log.c76
-rw-r--r--fs/gfs2/log.h12
-rw-r--r--fs/gfs2/lops.c83
-rw-r--r--fs/gfs2/lops.h14
-rw-r--r--fs/gfs2/meta_io.c35
-rw-r--r--fs/gfs2/meta_io.h3
-rw-r--r--fs/gfs2/ops_fstype.c4
-rw-r--r--fs/gfs2/quota.c142
-rw-r--r--fs/gfs2/quota.h15
-rw-r--r--fs/gfs2/rgrp.c18
-rw-r--r--fs/gfs2/super.c76
-rw-r--r--fs/gfs2/super.h3
-rw-r--r--fs/gfs2/sys.c62
-rw-r--r--fs/gfs2/trans.c124
-rw-r--r--fs/gfs2/trans.h3
-rw-r--r--fs/gfs2/util.c3
-rw-r--r--fs/gfs2/xattr.c40
27 files changed, 486 insertions, 494 deletions
diff --git a/fs/gfs2/acl.c b/fs/gfs2/acl.c
index f850020ad906..f69ac0af5496 100644
--- a/fs/gfs2/acl.c
+++ b/fs/gfs2/acl.c
@@ -237,7 +237,7 @@ static int gfs2_xattr_system_set(struct dentry *dentry, const char *name,
237 return -EINVAL; 237 return -EINVAL;
238 if (type == ACL_TYPE_DEFAULT && !S_ISDIR(inode->i_mode)) 238 if (type == ACL_TYPE_DEFAULT && !S_ISDIR(inode->i_mode))
239 return value ? -EACCES : 0; 239 return value ? -EACCES : 0;
240 if ((current_fsuid() != inode->i_uid) && !capable(CAP_FOWNER)) 240 if (!uid_eq(current_fsuid(), inode->i_uid) && !capable(CAP_FOWNER))
241 return -EPERM; 241 return -EPERM;
242 if (S_ISLNK(inode->i_mode)) 242 if (S_ISLNK(inode->i_mode))
243 return -EOPNOTSUPP; 243 return -EOPNOTSUPP;
diff --git a/fs/gfs2/aops.c b/fs/gfs2/aops.c
index 30de4f2a2ea9..24f414f0ce61 100644
--- a/fs/gfs2/aops.c
+++ b/fs/gfs2/aops.c
@@ -51,7 +51,7 @@ static void gfs2_page_add_databufs(struct gfs2_inode *ip, struct page *page,
51 continue; 51 continue;
52 if (gfs2_is_jdata(ip)) 52 if (gfs2_is_jdata(ip))
53 set_buffer_uptodate(bh); 53 set_buffer_uptodate(bh);
54 gfs2_trans_add_bh(ip->i_gl, bh, 0); 54 gfs2_trans_add_data(ip->i_gl, bh);
55 } 55 }
56} 56}
57 57
@@ -230,16 +230,14 @@ out_ignore:
230} 230}
231 231
232/** 232/**
233 * gfs2_writeback_writepages - Write a bunch of dirty pages back to disk 233 * gfs2_writepages - Write a bunch of dirty pages back to disk
234 * @mapping: The mapping to write 234 * @mapping: The mapping to write
235 * @wbc: Write-back control 235 * @wbc: Write-back control
236 * 236 *
237 * For the data=writeback case we can already ignore buffer heads 237 * Used for both ordered and writeback modes.
238 * and write whole extents at once. This is a big reduction in the
239 * number of I/O requests we send and the bmap calls we make in this case.
240 */ 238 */
241static int gfs2_writeback_writepages(struct address_space *mapping, 239static int gfs2_writepages(struct address_space *mapping,
242 struct writeback_control *wbc) 240 struct writeback_control *wbc)
243{ 241{
244 return mpage_writepages(mapping, wbc, gfs2_get_block_noalloc); 242 return mpage_writepages(mapping, wbc, gfs2_get_block_noalloc);
245} 243}
@@ -852,7 +850,7 @@ static int gfs2_write_end(struct file *file, struct address_space *mapping,
852 goto failed; 850 goto failed;
853 } 851 }
854 852
855 gfs2_trans_add_bh(ip->i_gl, dibh, 1); 853 gfs2_trans_add_meta(ip->i_gl, dibh);
856 854
857 if (gfs2_is_stuffed(ip)) 855 if (gfs2_is_stuffed(ip))
858 return gfs2_stuffed_write_end(inode, dibh, pos, len, copied, page); 856 return gfs2_stuffed_write_end(inode, dibh, pos, len, copied, page);
@@ -1102,7 +1100,7 @@ cannot_release:
1102 1100
1103static const struct address_space_operations gfs2_writeback_aops = { 1101static const struct address_space_operations gfs2_writeback_aops = {
1104 .writepage = gfs2_writeback_writepage, 1102 .writepage = gfs2_writeback_writepage,
1105 .writepages = gfs2_writeback_writepages, 1103 .writepages = gfs2_writepages,
1106 .readpage = gfs2_readpage, 1104 .readpage = gfs2_readpage,
1107 .readpages = gfs2_readpages, 1105 .readpages = gfs2_readpages,
1108 .write_begin = gfs2_write_begin, 1106 .write_begin = gfs2_write_begin,
@@ -1118,6 +1116,7 @@ static const struct address_space_operations gfs2_writeback_aops = {
1118 1116
1119static const struct address_space_operations gfs2_ordered_aops = { 1117static const struct address_space_operations gfs2_ordered_aops = {
1120 .writepage = gfs2_ordered_writepage, 1118 .writepage = gfs2_ordered_writepage,
1119 .writepages = gfs2_writepages,
1121 .readpage = gfs2_readpage, 1120 .readpage = gfs2_readpage,
1122 .readpages = gfs2_readpages, 1121 .readpages = gfs2_readpages,
1123 .write_begin = gfs2_write_begin, 1122 .write_begin = gfs2_write_begin,
diff --git a/fs/gfs2/bmap.c b/fs/gfs2/bmap.c
index a68e91bcef3d..5e83657f046e 100644
--- a/fs/gfs2/bmap.c
+++ b/fs/gfs2/bmap.c
@@ -22,6 +22,7 @@
22#include "meta_io.h" 22#include "meta_io.h"
23#include "quota.h" 23#include "quota.h"
24#include "rgrp.h" 24#include "rgrp.h"
25#include "log.h"
25#include "super.h" 26#include "super.h"
26#include "trans.h" 27#include "trans.h"
27#include "dir.h" 28#include "dir.h"
@@ -93,7 +94,7 @@ static int gfs2_unstuffer_page(struct gfs2_inode *ip, struct buffer_head *dibh,
93 if (!gfs2_is_jdata(ip)) 94 if (!gfs2_is_jdata(ip))
94 mark_buffer_dirty(bh); 95 mark_buffer_dirty(bh);
95 if (!gfs2_is_writeback(ip)) 96 if (!gfs2_is_writeback(ip))
96 gfs2_trans_add_bh(ip->i_gl, bh, 0); 97 gfs2_trans_add_data(ip->i_gl, bh);
97 98
98 if (release) { 99 if (release) {
99 unlock_page(page); 100 unlock_page(page);
@@ -153,7 +154,7 @@ int gfs2_unstuff_dinode(struct gfs2_inode *ip, struct page *page)
153 154
154 /* Set up the pointer to the new block */ 155 /* Set up the pointer to the new block */
155 156
156 gfs2_trans_add_bh(ip->i_gl, dibh, 1); 157 gfs2_trans_add_meta(ip->i_gl, dibh);
157 di = (struct gfs2_dinode *)dibh->b_data; 158 di = (struct gfs2_dinode *)dibh->b_data;
158 gfs2_buffer_clear_tail(dibh, sizeof(struct gfs2_dinode)); 159 gfs2_buffer_clear_tail(dibh, sizeof(struct gfs2_dinode));
159 160
@@ -405,7 +406,7 @@ static inline __be64 *gfs2_indirect_init(struct metapath *mp,
405 BUG_ON(i < 1); 406 BUG_ON(i < 1);
406 BUG_ON(mp->mp_bh[i] != NULL); 407 BUG_ON(mp->mp_bh[i] != NULL);
407 mp->mp_bh[i] = gfs2_meta_new(gl, bn); 408 mp->mp_bh[i] = gfs2_meta_new(gl, bn);
408 gfs2_trans_add_bh(gl, mp->mp_bh[i], 1); 409 gfs2_trans_add_meta(gl, mp->mp_bh[i]);
409 gfs2_metatype_set(mp->mp_bh[i], GFS2_METATYPE_IN, GFS2_FORMAT_IN); 410 gfs2_metatype_set(mp->mp_bh[i], GFS2_METATYPE_IN, GFS2_FORMAT_IN);
410 gfs2_buffer_clear_tail(mp->mp_bh[i], sizeof(struct gfs2_meta_header)); 411 gfs2_buffer_clear_tail(mp->mp_bh[i], sizeof(struct gfs2_meta_header));
411 ptr += offset; 412 ptr += offset;
@@ -468,7 +469,7 @@ static int gfs2_bmap_alloc(struct inode *inode, const sector_t lblock,
468 BUG_ON(sheight < 1); 469 BUG_ON(sheight < 1);
469 BUG_ON(dibh == NULL); 470 BUG_ON(dibh == NULL);
470 471
471 gfs2_trans_add_bh(ip->i_gl, dibh, 1); 472 gfs2_trans_add_meta(ip->i_gl, dibh);
472 473
473 if (height == sheight) { 474 if (height == sheight) {
474 struct buffer_head *bh; 475 struct buffer_head *bh;
@@ -544,7 +545,7 @@ static int gfs2_bmap_alloc(struct inode *inode, const sector_t lblock,
544 /* Branching from existing tree */ 545 /* Branching from existing tree */
545 case ALLOC_GROW_DEPTH: 546 case ALLOC_GROW_DEPTH:
546 if (i > 1 && i < height) 547 if (i > 1 && i < height)
547 gfs2_trans_add_bh(ip->i_gl, mp->mp_bh[i-1], 1); 548 gfs2_trans_add_meta(ip->i_gl, mp->mp_bh[i-1]);
548 for (; i < height && n > 0; i++, n--) 549 for (; i < height && n > 0; i++, n--)
549 gfs2_indirect_init(mp, ip->i_gl, i, 550 gfs2_indirect_init(mp, ip->i_gl, i,
550 mp->mp_list[i-1], bn++); 551 mp->mp_list[i-1], bn++);
@@ -556,7 +557,7 @@ static int gfs2_bmap_alloc(struct inode *inode, const sector_t lblock,
556 case ALLOC_DATA: 557 case ALLOC_DATA:
557 BUG_ON(n > dblks); 558 BUG_ON(n > dblks);
558 BUG_ON(mp->mp_bh[end_of_metadata] == NULL); 559 BUG_ON(mp->mp_bh[end_of_metadata] == NULL);
559 gfs2_trans_add_bh(ip->i_gl, mp->mp_bh[end_of_metadata], 1); 560 gfs2_trans_add_meta(ip->i_gl, mp->mp_bh[end_of_metadata]);
560 dblks = n; 561 dblks = n;
561 ptr = metapointer(end_of_metadata, mp); 562 ptr = metapointer(end_of_metadata, mp);
562 dblock = bn; 563 dblock = bn;
@@ -796,8 +797,8 @@ static int do_strip(struct gfs2_inode *ip, struct buffer_head *dibh,
796 797
797 down_write(&ip->i_rw_mutex); 798 down_write(&ip->i_rw_mutex);
798 799
799 gfs2_trans_add_bh(ip->i_gl, dibh, 1); 800 gfs2_trans_add_meta(ip->i_gl, dibh);
800 gfs2_trans_add_bh(ip->i_gl, bh, 1); 801 gfs2_trans_add_meta(ip->i_gl, bh);
801 802
802 bstart = 0; 803 bstart = 0;
803 blen = 0; 804 blen = 0;
@@ -981,7 +982,7 @@ static int gfs2_block_truncate_page(struct address_space *mapping, loff_t from)
981 } 982 }
982 983
983 if (!gfs2_is_writeback(ip)) 984 if (!gfs2_is_writeback(ip))
984 gfs2_trans_add_bh(ip->i_gl, bh, 0); 985 gfs2_trans_add_data(ip->i_gl, bh);
985 986
986 zero_user(page, offset, length); 987 zero_user(page, offset, length);
987 mark_buffer_dirty(bh); 988 mark_buffer_dirty(bh);
@@ -1046,7 +1047,7 @@ static int trunc_start(struct inode *inode, u64 oldsize, u64 newsize)
1046 if (error) 1047 if (error)
1047 goto out; 1048 goto out;
1048 1049
1049 gfs2_trans_add_bh(ip->i_gl, dibh, 1); 1050 gfs2_trans_add_meta(ip->i_gl, dibh);
1050 1051
1051 if (gfs2_is_stuffed(ip)) { 1052 if (gfs2_is_stuffed(ip)) {
1052 gfs2_buffer_clear_tail(dibh, sizeof(struct gfs2_dinode) + newsize); 1053 gfs2_buffer_clear_tail(dibh, sizeof(struct gfs2_dinode) + newsize);
@@ -1098,7 +1099,7 @@ static int trunc_dealloc(struct gfs2_inode *ip, u64 size)
1098 if (error) 1099 if (error)
1099 return error; 1100 return error;
1100 1101
1101 error = gfs2_quota_hold(ip, NO_QUOTA_CHANGE, NO_QUOTA_CHANGE); 1102 error = gfs2_quota_hold(ip, NO_UID_QUOTA_CHANGE, NO_GID_QUOTA_CHANGE);
1102 if (error) 1103 if (error)
1103 return error; 1104 return error;
1104 1105
@@ -1137,11 +1138,12 @@ static int trunc_end(struct gfs2_inode *ip)
1137 ip->i_height = 0; 1138 ip->i_height = 0;
1138 ip->i_goal = ip->i_no_addr; 1139 ip->i_goal = ip->i_no_addr;
1139 gfs2_buffer_clear_tail(dibh, sizeof(struct gfs2_dinode)); 1140 gfs2_buffer_clear_tail(dibh, sizeof(struct gfs2_dinode));
1141 gfs2_ordered_del_inode(ip);
1140 } 1142 }
1141 ip->i_inode.i_mtime = ip->i_inode.i_ctime = CURRENT_TIME; 1143 ip->i_inode.i_mtime = ip->i_inode.i_ctime = CURRENT_TIME;
1142 ip->i_diskflags &= ~GFS2_DIF_TRUNC_IN_PROG; 1144 ip->i_diskflags &= ~GFS2_DIF_TRUNC_IN_PROG;
1143 1145
1144 gfs2_trans_add_bh(ip->i_gl, dibh, 1); 1146 gfs2_trans_add_meta(ip->i_gl, dibh);
1145 gfs2_dinode_out(ip, dibh->b_data); 1147 gfs2_dinode_out(ip, dibh->b_data);
1146 brelse(dibh); 1148 brelse(dibh);
1147 1149
@@ -1246,7 +1248,7 @@ static int do_grow(struct inode *inode, u64 size)
1246 1248
1247 i_size_write(inode, size); 1249 i_size_write(inode, size);
1248 ip->i_inode.i_mtime = ip->i_inode.i_ctime = CURRENT_TIME; 1250 ip->i_inode.i_mtime = ip->i_inode.i_ctime = CURRENT_TIME;
1249 gfs2_trans_add_bh(ip->i_gl, dibh, 1); 1251 gfs2_trans_add_meta(ip->i_gl, dibh);
1250 gfs2_dinode_out(ip, dibh->b_data); 1252 gfs2_dinode_out(ip, dibh->b_data);
1251 brelse(dibh); 1253 brelse(dibh);
1252 1254
@@ -1286,6 +1288,10 @@ int gfs2_setattr_size(struct inode *inode, u64 newsize)
1286 1288
1287 inode_dio_wait(inode); 1289 inode_dio_wait(inode);
1288 1290
1291 ret = gfs2_rs_alloc(GFS2_I(inode));
1292 if (ret)
1293 return ret;
1294
1289 oldsize = inode->i_size; 1295 oldsize = inode->i_size;
1290 if (newsize >= oldsize) 1296 if (newsize >= oldsize)
1291 return do_grow(inode, newsize); 1297 return do_grow(inode, newsize);
diff --git a/fs/gfs2/dir.c b/fs/gfs2/dir.c
index 9a35670fdc38..c3e82bd23179 100644
--- a/fs/gfs2/dir.c
+++ b/fs/gfs2/dir.c
@@ -93,7 +93,7 @@ int gfs2_dir_get_new_buffer(struct gfs2_inode *ip, u64 block,
93 struct buffer_head *bh; 93 struct buffer_head *bh;
94 94
95 bh = gfs2_meta_new(ip->i_gl, block); 95 bh = gfs2_meta_new(ip->i_gl, block);
96 gfs2_trans_add_bh(ip->i_gl, bh, 1); 96 gfs2_trans_add_meta(ip->i_gl, bh);
97 gfs2_metatype_set(bh, GFS2_METATYPE_JD, GFS2_FORMAT_JD); 97 gfs2_metatype_set(bh, GFS2_METATYPE_JD, GFS2_FORMAT_JD);
98 gfs2_buffer_clear_tail(bh, sizeof(struct gfs2_meta_header)); 98 gfs2_buffer_clear_tail(bh, sizeof(struct gfs2_meta_header));
99 *bhp = bh; 99 *bhp = bh;
@@ -127,7 +127,7 @@ static int gfs2_dir_write_stuffed(struct gfs2_inode *ip, const char *buf,
127 if (error) 127 if (error)
128 return error; 128 return error;
129 129
130 gfs2_trans_add_bh(ip->i_gl, dibh, 1); 130 gfs2_trans_add_meta(ip->i_gl, dibh);
131 memcpy(dibh->b_data + offset + sizeof(struct gfs2_dinode), buf, size); 131 memcpy(dibh->b_data + offset + sizeof(struct gfs2_dinode), buf, size);
132 if (ip->i_inode.i_size < offset + size) 132 if (ip->i_inode.i_size < offset + size)
133 i_size_write(&ip->i_inode, offset + size); 133 i_size_write(&ip->i_inode, offset + size);
@@ -209,7 +209,7 @@ static int gfs2_dir_write_data(struct gfs2_inode *ip, const char *buf,
209 if (error) 209 if (error)
210 goto fail; 210 goto fail;
211 211
212 gfs2_trans_add_bh(ip->i_gl, bh, 1); 212 gfs2_trans_add_meta(ip->i_gl, bh);
213 memcpy(bh->b_data + o, buf, amount); 213 memcpy(bh->b_data + o, buf, amount);
214 brelse(bh); 214 brelse(bh);
215 215
@@ -231,7 +231,7 @@ out:
231 i_size_write(&ip->i_inode, offset + copied); 231 i_size_write(&ip->i_inode, offset + copied);
232 ip->i_inode.i_mtime = ip->i_inode.i_ctime = CURRENT_TIME; 232 ip->i_inode.i_mtime = ip->i_inode.i_ctime = CURRENT_TIME;
233 233
234 gfs2_trans_add_bh(ip->i_gl, dibh, 1); 234 gfs2_trans_add_meta(ip->i_gl, dibh);
235 gfs2_dinode_out(ip, dibh->b_data); 235 gfs2_dinode_out(ip, dibh->b_data);
236 brelse(dibh); 236 brelse(dibh);
237 237
@@ -647,7 +647,7 @@ static void dirent_del(struct gfs2_inode *dip, struct buffer_head *bh,
647 return; 647 return;
648 } 648 }
649 649
650 gfs2_trans_add_bh(dip->i_gl, bh, 1); 650 gfs2_trans_add_meta(dip->i_gl, bh);
651 651
652 /* If there is no prev entry, this is the first entry in the block. 652 /* If there is no prev entry, this is the first entry in the block.
653 The de_rec_len is already as big as it needs to be. Just zero 653 The de_rec_len is already as big as it needs to be. Just zero
@@ -690,7 +690,7 @@ static struct gfs2_dirent *gfs2_init_dirent(struct inode *inode,
690 offset = GFS2_DIRENT_SIZE(be16_to_cpu(dent->de_name_len)); 690 offset = GFS2_DIRENT_SIZE(be16_to_cpu(dent->de_name_len));
691 totlen = be16_to_cpu(dent->de_rec_len); 691 totlen = be16_to_cpu(dent->de_rec_len);
692 BUG_ON(offset + name->len > totlen); 692 BUG_ON(offset + name->len > totlen);
693 gfs2_trans_add_bh(ip->i_gl, bh, 1); 693 gfs2_trans_add_meta(ip->i_gl, bh);
694 ndent = (struct gfs2_dirent *)((char *)dent + offset); 694 ndent = (struct gfs2_dirent *)((char *)dent + offset);
695 dent->de_rec_len = cpu_to_be16(offset); 695 dent->de_rec_len = cpu_to_be16(offset);
696 gfs2_qstr2dirent(name, totlen - offset, ndent); 696 gfs2_qstr2dirent(name, totlen - offset, ndent);
@@ -831,7 +831,7 @@ static struct gfs2_leaf *new_leaf(struct inode *inode, struct buffer_head **pbh,
831 return NULL; 831 return NULL;
832 832
833 gfs2_trans_add_unrevoke(GFS2_SB(inode), bn, 1); 833 gfs2_trans_add_unrevoke(GFS2_SB(inode), bn, 1);
834 gfs2_trans_add_bh(ip->i_gl, bh, 1); 834 gfs2_trans_add_meta(ip->i_gl, bh);
835 gfs2_metatype_set(bh, GFS2_METATYPE_LF, GFS2_FORMAT_LF); 835 gfs2_metatype_set(bh, GFS2_METATYPE_LF, GFS2_FORMAT_LF);
836 leaf = (struct gfs2_leaf *)bh->b_data; 836 leaf = (struct gfs2_leaf *)bh->b_data;
837 leaf->lf_depth = cpu_to_be16(depth); 837 leaf->lf_depth = cpu_to_be16(depth);
@@ -916,7 +916,7 @@ static int dir_make_exhash(struct inode *inode)
916 /* We're done with the new leaf block, now setup the new 916 /* We're done with the new leaf block, now setup the new
917 hash table. */ 917 hash table. */
918 918
919 gfs2_trans_add_bh(dip->i_gl, dibh, 1); 919 gfs2_trans_add_meta(dip->i_gl, dibh);
920 gfs2_buffer_clear_tail(dibh, sizeof(struct gfs2_dinode)); 920 gfs2_buffer_clear_tail(dibh, sizeof(struct gfs2_dinode));
921 921
922 lp = (__be64 *)(dibh->b_data + sizeof(struct gfs2_dinode)); 922 lp = (__be64 *)(dibh->b_data + sizeof(struct gfs2_dinode));
@@ -976,7 +976,7 @@ static int dir_split_leaf(struct inode *inode, const struct qstr *name)
976 return 1; /* can't split */ 976 return 1; /* can't split */
977 } 977 }
978 978
979 gfs2_trans_add_bh(dip->i_gl, obh, 1); 979 gfs2_trans_add_meta(dip->i_gl, obh);
980 980
981 nleaf = new_leaf(inode, &nbh, be16_to_cpu(oleaf->lf_depth) + 1); 981 nleaf = new_leaf(inode, &nbh, be16_to_cpu(oleaf->lf_depth) + 1);
982 if (!nleaf) { 982 if (!nleaf) {
@@ -1069,7 +1069,7 @@ static int dir_split_leaf(struct inode *inode, const struct qstr *name)
1069 1069
1070 error = gfs2_meta_inode_buffer(dip, &dibh); 1070 error = gfs2_meta_inode_buffer(dip, &dibh);
1071 if (!gfs2_assert_withdraw(GFS2_SB(&dip->i_inode), !error)) { 1071 if (!gfs2_assert_withdraw(GFS2_SB(&dip->i_inode), !error)) {
1072 gfs2_trans_add_bh(dip->i_gl, dibh, 1); 1072 gfs2_trans_add_meta(dip->i_gl, dibh);
1073 gfs2_add_inode_blocks(&dip->i_inode, 1); 1073 gfs2_add_inode_blocks(&dip->i_inode, 1);
1074 gfs2_dinode_out(dip, dibh->b_data); 1074 gfs2_dinode_out(dip, dibh->b_data);
1075 brelse(dibh); 1075 brelse(dibh);
@@ -1622,7 +1622,7 @@ static int dir_new_leaf(struct inode *inode, const struct qstr *name)
1622 return error; 1622 return error;
1623 } while(1); 1623 } while(1);
1624 1624
1625 gfs2_trans_add_bh(ip->i_gl, obh, 1); 1625 gfs2_trans_add_meta(ip->i_gl, obh);
1626 1626
1627 leaf = new_leaf(inode, &bh, be16_to_cpu(oleaf->lf_depth)); 1627 leaf = new_leaf(inode, &bh, be16_to_cpu(oleaf->lf_depth));
1628 if (!leaf) { 1628 if (!leaf) {
@@ -1636,7 +1636,7 @@ static int dir_new_leaf(struct inode *inode, const struct qstr *name)
1636 error = gfs2_meta_inode_buffer(ip, &bh); 1636 error = gfs2_meta_inode_buffer(ip, &bh);
1637 if (error) 1637 if (error)
1638 return error; 1638 return error;
1639 gfs2_trans_add_bh(ip->i_gl, bh, 1); 1639 gfs2_trans_add_meta(ip->i_gl, bh);
1640 gfs2_add_inode_blocks(&ip->i_inode, 1); 1640 gfs2_add_inode_blocks(&ip->i_inode, 1);
1641 gfs2_dinode_out(ip, bh->b_data); 1641 gfs2_dinode_out(ip, bh->b_data);
1642 brelse(bh); 1642 brelse(bh);
@@ -1795,7 +1795,7 @@ int gfs2_dir_mvino(struct gfs2_inode *dip, const struct qstr *filename,
1795 if (IS_ERR(dent)) 1795 if (IS_ERR(dent))
1796 return PTR_ERR(dent); 1796 return PTR_ERR(dent);
1797 1797
1798 gfs2_trans_add_bh(dip->i_gl, bh, 1); 1798 gfs2_trans_add_meta(dip->i_gl, bh);
1799 gfs2_inum_out(nip, dent); 1799 gfs2_inum_out(nip, dent);
1800 dent->de_type = cpu_to_be16(new_type); 1800 dent->de_type = cpu_to_be16(new_type);
1801 1801
@@ -1804,7 +1804,7 @@ int gfs2_dir_mvino(struct gfs2_inode *dip, const struct qstr *filename,
1804 error = gfs2_meta_inode_buffer(dip, &bh); 1804 error = gfs2_meta_inode_buffer(dip, &bh);
1805 if (error) 1805 if (error)
1806 return error; 1806 return error;
1807 gfs2_trans_add_bh(dip->i_gl, bh, 1); 1807 gfs2_trans_add_meta(dip->i_gl, bh);
1808 } 1808 }
1809 1809
1810 dip->i_inode.i_mtime = dip->i_inode.i_ctime = CURRENT_TIME; 1810 dip->i_inode.i_mtime = dip->i_inode.i_ctime = CURRENT_TIME;
@@ -1849,7 +1849,7 @@ static int leaf_dealloc(struct gfs2_inode *dip, u32 index, u32 len,
1849 if (!ht) 1849 if (!ht)
1850 return -ENOMEM; 1850 return -ENOMEM;
1851 1851
1852 error = gfs2_quota_hold(dip, NO_QUOTA_CHANGE, NO_QUOTA_CHANGE); 1852 error = gfs2_quota_hold(dip, NO_UID_QUOTA_CHANGE, NO_GID_QUOTA_CHANGE);
1853 if (error) 1853 if (error)
1854 goto out; 1854 goto out;
1855 1855
@@ -1917,7 +1917,7 @@ static int leaf_dealloc(struct gfs2_inode *dip, u32 index, u32 len,
1917 if (error) 1917 if (error)
1918 goto out_end_trans; 1918 goto out_end_trans;
1919 1919
1920 gfs2_trans_add_bh(dip->i_gl, dibh, 1); 1920 gfs2_trans_add_meta(dip->i_gl, dibh);
1921 /* On the last dealloc, make this a regular file in case we crash. 1921 /* On the last dealloc, make this a regular file in case we crash.
1922 (We don't want to free these blocks a second time.) */ 1922 (We don't want to free these blocks a second time.) */
1923 if (last_dealloc) 1923 if (last_dealloc)
diff --git a/fs/gfs2/file.c b/fs/gfs2/file.c
index 44543df9f400..019f45e45097 100644
--- a/fs/gfs2/file.c
+++ b/fs/gfs2/file.c
@@ -276,7 +276,7 @@ static int do_gfs2_set_flags(struct file *filp, u32 reqflags, u32 mask)
276 error = gfs2_meta_inode_buffer(ip, &bh); 276 error = gfs2_meta_inode_buffer(ip, &bh);
277 if (error) 277 if (error)
278 goto out_trans_end; 278 goto out_trans_end;
279 gfs2_trans_add_bh(ip->i_gl, bh, 1); 279 gfs2_trans_add_meta(ip->i_gl, bh);
280 ip->i_diskflags = new_flags; 280 ip->i_diskflags = new_flags;
281 gfs2_dinode_out(ip, bh->b_data); 281 gfs2_dinode_out(ip, bh->b_data);
282 brelse(bh); 282 brelse(bh);
@@ -483,7 +483,7 @@ out:
483 gfs2_holder_uninit(&gh); 483 gfs2_holder_uninit(&gh);
484 if (ret == 0) { 484 if (ret == 0) {
485 set_page_dirty(page); 485 set_page_dirty(page);
486 wait_on_page_writeback(page); 486 wait_for_stable_page(page);
487 } 487 }
488 sb_end_pagefault(inode->i_sb); 488 sb_end_pagefault(inode->i_sb);
489 return block_page_mkwrite_return(ret); 489 return block_page_mkwrite_return(ret);
@@ -708,7 +708,7 @@ static int fallocate_chunk(struct inode *inode, loff_t offset, loff_t len,
708 if (unlikely(error)) 708 if (unlikely(error))
709 return error; 709 return error;
710 710
711 gfs2_trans_add_bh(ip->i_gl, dibh, 1); 711 gfs2_trans_add_meta(ip->i_gl, dibh);
712 712
713 if (gfs2_is_stuffed(ip)) { 713 if (gfs2_is_stuffed(ip)) {
714 error = gfs2_unstuff_dinode(ip, NULL); 714 error = gfs2_unstuff_dinode(ip, NULL);
diff --git a/fs/gfs2/glock.c b/fs/gfs2/glock.c
index 992c5c0cb504..cf3515546739 100644
--- a/fs/gfs2/glock.c
+++ b/fs/gfs2/glock.c
@@ -30,6 +30,7 @@
30#include <linux/rculist_bl.h> 30#include <linux/rculist_bl.h>
31#include <linux/bit_spinlock.h> 31#include <linux/bit_spinlock.h>
32#include <linux/percpu.h> 32#include <linux/percpu.h>
33#include <linux/list_sort.h>
33 34
34#include "gfs2.h" 35#include "gfs2.h"
35#include "incore.h" 36#include "incore.h"
@@ -1376,56 +1377,105 @@ void gfs2_glock_complete(struct gfs2_glock *gl, int ret)
1376 gfs2_glock_put(gl); 1377 gfs2_glock_put(gl);
1377} 1378}
1378 1379
1380static int glock_cmp(void *priv, struct list_head *a, struct list_head *b)
1381{
1382 struct gfs2_glock *gla, *glb;
1379 1383
1380static int gfs2_shrink_glock_memory(struct shrinker *shrink, 1384 gla = list_entry(a, struct gfs2_glock, gl_lru);
1381 struct shrink_control *sc) 1385 glb = list_entry(b, struct gfs2_glock, gl_lru);
1386
1387 if (gla->gl_name.ln_number > glb->gl_name.ln_number)
1388 return 1;
1389 if (gla->gl_name.ln_number < glb->gl_name.ln_number)
1390 return -1;
1391
1392 return 0;
1393}
1394
1395/**
1396 * gfs2_dispose_glock_lru - Demote a list of glocks
1397 * @list: The list to dispose of
1398 *
1399 * Disposing of glocks may involve disk accesses, so that here we sort
1400 * the glocks by number (i.e. disk location of the inodes) so that if
1401 * there are any such accesses, they'll be sent in order (mostly).
1402 *
1403 * Must be called under the lru_lock, but may drop and retake this
1404 * lock. While the lru_lock is dropped, entries may vanish from the
1405 * list, but no new entries will appear on the list (since it is
1406 * private)
1407 */
1408
1409static void gfs2_dispose_glock_lru(struct list_head *list)
1410__releases(&lru_lock)
1411__acquires(&lru_lock)
1382{ 1412{
1383 struct gfs2_glock *gl; 1413 struct gfs2_glock *gl;
1384 int may_demote;
1385 int nr_skipped = 0;
1386 int nr = sc->nr_to_scan;
1387 gfp_t gfp_mask = sc->gfp_mask;
1388 LIST_HEAD(skipped);
1389 1414
1390 if (nr == 0) 1415 list_sort(NULL, list, glock_cmp);
1391 goto out;
1392 1416
1393 if (!(gfp_mask & __GFP_FS)) 1417 while(!list_empty(list)) {
1394 return -1; 1418 gl = list_entry(list->next, struct gfs2_glock, gl_lru);
1419 list_del_init(&gl->gl_lru);
1420 clear_bit(GLF_LRU, &gl->gl_flags);
1421 gfs2_glock_hold(gl);
1422 spin_unlock(&lru_lock);
1423 spin_lock(&gl->gl_spin);
1424 if (demote_ok(gl))
1425 handle_callback(gl, LM_ST_UNLOCKED, 0);
1426 WARN_ON(!test_and_clear_bit(GLF_LOCK, &gl->gl_flags));
1427 smp_mb__after_clear_bit();
1428 if (queue_delayed_work(glock_workqueue, &gl->gl_work, 0) == 0)
1429 gfs2_glock_put_nolock(gl);
1430 spin_unlock(&gl->gl_spin);
1431 spin_lock(&lru_lock);
1432 }
1433}
1434
1435/**
1436 * gfs2_scan_glock_lru - Scan the LRU looking for locks to demote
1437 * @nr: The number of entries to scan
1438 *
1439 * This function selects the entries on the LRU which are able to
1440 * be demoted, and then kicks off the process by calling
1441 * gfs2_dispose_glock_lru() above.
1442 */
1443
1444static void gfs2_scan_glock_lru(int nr)
1445{
1446 struct gfs2_glock *gl;
1447 LIST_HEAD(skipped);
1448 LIST_HEAD(dispose);
1395 1449
1396 spin_lock(&lru_lock); 1450 spin_lock(&lru_lock);
1397 while(nr && !list_empty(&lru_list)) { 1451 while(nr && !list_empty(&lru_list)) {
1398 gl = list_entry(lru_list.next, struct gfs2_glock, gl_lru); 1452 gl = list_entry(lru_list.next, struct gfs2_glock, gl_lru);
1399 list_del_init(&gl->gl_lru);
1400 clear_bit(GLF_LRU, &gl->gl_flags);
1401 atomic_dec(&lru_count);
1402 1453
1403 /* Test for being demotable */ 1454 /* Test for being demotable */
1404 if (!test_and_set_bit(GLF_LOCK, &gl->gl_flags)) { 1455 if (!test_and_set_bit(GLF_LOCK, &gl->gl_flags)) {
1405 gfs2_glock_hold(gl); 1456 list_move(&gl->gl_lru, &dispose);
1406 spin_unlock(&lru_lock); 1457 atomic_dec(&lru_count);
1407 spin_lock(&gl->gl_spin); 1458 nr--;
1408 may_demote = demote_ok(gl);
1409 if (may_demote) {
1410 handle_callback(gl, LM_ST_UNLOCKED, 0);
1411 nr--;
1412 }
1413 clear_bit(GLF_LOCK, &gl->gl_flags);
1414 smp_mb__after_clear_bit();
1415 if (queue_delayed_work(glock_workqueue, &gl->gl_work, 0) == 0)
1416 gfs2_glock_put_nolock(gl);
1417 spin_unlock(&gl->gl_spin);
1418 spin_lock(&lru_lock);
1419 continue; 1459 continue;
1420 } 1460 }
1421 nr_skipped++; 1461
1422 list_add(&gl->gl_lru, &skipped); 1462 list_move(&gl->gl_lru, &skipped);
1423 set_bit(GLF_LRU, &gl->gl_flags);
1424 } 1463 }
1425 list_splice(&skipped, &lru_list); 1464 list_splice(&skipped, &lru_list);
1426 atomic_add(nr_skipped, &lru_count); 1465 if (!list_empty(&dispose))
1466 gfs2_dispose_glock_lru(&dispose);
1427 spin_unlock(&lru_lock); 1467 spin_unlock(&lru_lock);
1428out: 1468}
1469
1470static int gfs2_shrink_glock_memory(struct shrinker *shrink,
1471 struct shrink_control *sc)
1472{
1473 if (sc->nr_to_scan) {
1474 if (!(sc->gfp_mask & __GFP_FS))
1475 return -1;
1476 gfs2_scan_glock_lru(sc->nr_to_scan);
1477 }
1478
1429 return (atomic_read(&lru_count) / 100) * sysctl_vfs_cache_pressure; 1479 return (atomic_read(&lru_count) / 100) * sysctl_vfs_cache_pressure;
1430} 1480}
1431 1481
diff --git a/fs/gfs2/glops.c b/fs/gfs2/glops.c
index 78d4184ffc7d..444b6503ebc4 100644
--- a/fs/gfs2/glops.c
+++ b/fs/gfs2/glops.c
@@ -322,8 +322,8 @@ static int gfs2_dinode_in(struct gfs2_inode *ip, const void *buf)
322 break; 322 break;
323 }; 323 };
324 324
325 ip->i_inode.i_uid = be32_to_cpu(str->di_uid); 325 i_uid_write(&ip->i_inode, be32_to_cpu(str->di_uid));
326 ip->i_inode.i_gid = be32_to_cpu(str->di_gid); 326 i_gid_write(&ip->i_inode, be32_to_cpu(str->di_gid));
327 gfs2_set_nlink(&ip->i_inode, be32_to_cpu(str->di_nlink)); 327 gfs2_set_nlink(&ip->i_inode, be32_to_cpu(str->di_nlink));
328 i_size_write(&ip->i_inode, be64_to_cpu(str->di_size)); 328 i_size_write(&ip->i_inode, be64_to_cpu(str->di_size));
329 gfs2_set_inode_blocks(&ip->i_inode, be64_to_cpu(str->di_blocks)); 329 gfs2_set_inode_blocks(&ip->i_inode, be64_to_cpu(str->di_blocks));
diff --git a/fs/gfs2/incore.h b/fs/gfs2/incore.h
index c373a24fedd9..156e42ec84ea 100644
--- a/fs/gfs2/incore.h
+++ b/fs/gfs2/incore.h
@@ -52,7 +52,6 @@ struct gfs2_log_header_host {
52 */ 52 */
53 53
54struct gfs2_log_operations { 54struct gfs2_log_operations {
55 void (*lo_add) (struct gfs2_sbd *sdp, struct gfs2_bufdata *bd);
56 void (*lo_before_commit) (struct gfs2_sbd *sdp); 55 void (*lo_before_commit) (struct gfs2_sbd *sdp);
57 void (*lo_after_commit) (struct gfs2_sbd *sdp, struct gfs2_ail *ai); 56 void (*lo_after_commit) (struct gfs2_sbd *sdp, struct gfs2_ail *ai);
58 void (*lo_before_scan) (struct gfs2_jdesc *jd, 57 void (*lo_before_scan) (struct gfs2_jdesc *jd,
@@ -341,6 +340,7 @@ enum {
341 GIF_QD_LOCKED = 1, 340 GIF_QD_LOCKED = 1,
342 GIF_ALLOC_FAILED = 2, 341 GIF_ALLOC_FAILED = 2,
343 GIF_SW_PAGED = 3, 342 GIF_SW_PAGED = 3,
343 GIF_ORDERED = 4,
344}; 344};
345 345
346struct gfs2_inode { 346struct gfs2_inode {
@@ -357,6 +357,7 @@ struct gfs2_inode {
357 struct gfs2_rgrpd *i_rgd; 357 struct gfs2_rgrpd *i_rgd;
358 u64 i_goal; /* goal block for allocations */ 358 u64 i_goal; /* goal block for allocations */
359 struct rw_semaphore i_rw_mutex; 359 struct rw_semaphore i_rw_mutex;
360 struct list_head i_ordered;
360 struct list_head i_trunc_list; 361 struct list_head i_trunc_list;
361 __be64 *i_hash_cache; 362 __be64 *i_hash_cache;
362 u32 i_entries; 363 u32 i_entries;
@@ -391,7 +392,6 @@ struct gfs2_revoke_replay {
391}; 392};
392 393
393enum { 394enum {
394 QDF_USER = 0,
395 QDF_CHANGE = 1, 395 QDF_CHANGE = 1,
396 QDF_LOCKED = 2, 396 QDF_LOCKED = 2,
397 QDF_REFRESH = 3, 397 QDF_REFRESH = 3,
@@ -403,7 +403,7 @@ struct gfs2_quota_data {
403 403
404 atomic_t qd_count; 404 atomic_t qd_count;
405 405
406 u32 qd_id; 406 struct kqid qd_id;
407 unsigned long qd_flags; /* QDF_... */ 407 unsigned long qd_flags; /* QDF_... */
408 408
409 s64 qd_change; 409 s64 qd_change;
@@ -641,6 +641,7 @@ struct gfs2_sbd {
641 wait_queue_head_t sd_glock_wait; 641 wait_queue_head_t sd_glock_wait;
642 atomic_t sd_glock_disposal; 642 atomic_t sd_glock_disposal;
643 struct completion sd_locking_init; 643 struct completion sd_locking_init;
644 struct completion sd_wdack;
644 struct delayed_work sd_control_work; 645 struct delayed_work sd_control_work;
645 646
646 /* Inode Stuff */ 647 /* Inode Stuff */
@@ -723,6 +724,7 @@ struct gfs2_sbd {
723 struct list_head sd_log_le_revoke; 724 struct list_head sd_log_le_revoke;
724 struct list_head sd_log_le_databuf; 725 struct list_head sd_log_le_databuf;
725 struct list_head sd_log_le_ordered; 726 struct list_head sd_log_le_ordered;
727 spinlock_t sd_ordered_lock;
726 728
727 atomic_t sd_log_thresh1; 729 atomic_t sd_log_thresh1;
728 atomic_t sd_log_thresh2; 730 atomic_t sd_log_thresh2;
@@ -758,10 +760,7 @@ struct gfs2_sbd {
758 unsigned int sd_replayed_blocks; 760 unsigned int sd_replayed_blocks;
759 761
760 /* For quiescing the filesystem */ 762 /* For quiescing the filesystem */
761
762 struct gfs2_holder sd_freeze_gh; 763 struct gfs2_holder sd_freeze_gh;
763 struct mutex sd_freeze_lock;
764 unsigned int sd_freeze_count;
765 764
766 char sd_fsname[GFS2_FSNAME_LEN]; 765 char sd_fsname[GFS2_FSNAME_LEN];
767 char sd_table_name[GFS2_FSNAME_LEN]; 766 char sd_table_name[GFS2_FSNAME_LEN];
diff --git a/fs/gfs2/inode.c b/fs/gfs2/inode.c
index 2b6f5698ef18..cc00bd1d1f87 100644
--- a/fs/gfs2/inode.c
+++ b/fs/gfs2/inode.c
@@ -368,10 +368,11 @@ static void munge_mode_uid_gid(const struct gfs2_inode *dip,
368 struct inode *inode) 368 struct inode *inode)
369{ 369{
370 if (GFS2_SB(&dip->i_inode)->sd_args.ar_suiddir && 370 if (GFS2_SB(&dip->i_inode)->sd_args.ar_suiddir &&
371 (dip->i_inode.i_mode & S_ISUID) && dip->i_inode.i_uid) { 371 (dip->i_inode.i_mode & S_ISUID) &&
372 !uid_eq(dip->i_inode.i_uid, GLOBAL_ROOT_UID)) {
372 if (S_ISDIR(inode->i_mode)) 373 if (S_ISDIR(inode->i_mode))
373 inode->i_mode |= S_ISUID; 374 inode->i_mode |= S_ISUID;
374 else if (dip->i_inode.i_uid != current_fsuid()) 375 else if (!uid_eq(dip->i_inode.i_uid, current_fsuid()))
375 inode->i_mode &= ~07111; 376 inode->i_mode &= ~07111;
376 inode->i_uid = dip->i_inode.i_uid; 377 inode->i_uid = dip->i_inode.i_uid;
377 } else 378 } else
@@ -447,7 +448,7 @@ static void init_dinode(struct gfs2_inode *dip, struct gfs2_inode *ip,
447 struct timespec tv = CURRENT_TIME; 448 struct timespec tv = CURRENT_TIME;
448 449
449 dibh = gfs2_meta_new(ip->i_gl, ip->i_no_addr); 450 dibh = gfs2_meta_new(ip->i_gl, ip->i_no_addr);
450 gfs2_trans_add_bh(ip->i_gl, dibh, 1); 451 gfs2_trans_add_meta(ip->i_gl, dibh);
451 gfs2_metatype_set(dibh, GFS2_METATYPE_DI, GFS2_FORMAT_DI); 452 gfs2_metatype_set(dibh, GFS2_METATYPE_DI, GFS2_FORMAT_DI);
452 gfs2_buffer_clear_tail(dibh, sizeof(struct gfs2_dinode)); 453 gfs2_buffer_clear_tail(dibh, sizeof(struct gfs2_dinode));
453 di = (struct gfs2_dinode *)dibh->b_data; 454 di = (struct gfs2_dinode *)dibh->b_data;
@@ -455,8 +456,8 @@ static void init_dinode(struct gfs2_inode *dip, struct gfs2_inode *ip,
455 di->di_num.no_formal_ino = cpu_to_be64(ip->i_no_formal_ino); 456 di->di_num.no_formal_ino = cpu_to_be64(ip->i_no_formal_ino);
456 di->di_num.no_addr = cpu_to_be64(ip->i_no_addr); 457 di->di_num.no_addr = cpu_to_be64(ip->i_no_addr);
457 di->di_mode = cpu_to_be32(ip->i_inode.i_mode); 458 di->di_mode = cpu_to_be32(ip->i_inode.i_mode);
458 di->di_uid = cpu_to_be32(ip->i_inode.i_uid); 459 di->di_uid = cpu_to_be32(i_uid_read(&ip->i_inode));
459 di->di_gid = cpu_to_be32(ip->i_inode.i_gid); 460 di->di_gid = cpu_to_be32(i_gid_read(&ip->i_inode));
460 di->di_nlink = 0; 461 di->di_nlink = 0;
461 di->di_size = cpu_to_be64(ip->i_inode.i_size); 462 di->di_size = cpu_to_be64(ip->i_inode.i_size);
462 di->di_blocks = cpu_to_be64(1); 463 di->di_blocks = cpu_to_be64(1);
@@ -548,7 +549,7 @@ static int link_dinode(struct gfs2_inode *dip, const struct qstr *name,
548 if (error) 549 if (error)
549 return error; 550 return error;
550 551
551 error = gfs2_quota_lock(dip, NO_QUOTA_CHANGE, NO_QUOTA_CHANGE); 552 error = gfs2_quota_lock(dip, NO_UID_QUOTA_CHANGE, NO_GID_QUOTA_CHANGE);
552 if (error) 553 if (error)
553 goto fail; 554 goto fail;
554 555
@@ -584,7 +585,7 @@ static int link_dinode(struct gfs2_inode *dip, const struct qstr *name,
584 if (error) 585 if (error)
585 goto fail_end_trans; 586 goto fail_end_trans;
586 set_nlink(&ip->i_inode, S_ISDIR(ip->i_inode.i_mode) ? 2 : 1); 587 set_nlink(&ip->i_inode, S_ISDIR(ip->i_inode.i_mode) ? 2 : 1);
587 gfs2_trans_add_bh(ip->i_gl, dibh, 1); 588 gfs2_trans_add_meta(ip->i_gl, dibh);
588 gfs2_dinode_out(ip, dibh->b_data); 589 gfs2_dinode_out(ip, dibh->b_data);
589 brelse(dibh); 590 brelse(dibh);
590 return 0; 591 return 0;
@@ -931,7 +932,7 @@ static int gfs2_link(struct dentry *old_dentry, struct inode *dir,
931 if (error) 932 if (error)
932 goto out_brelse; 933 goto out_brelse;
933 934
934 gfs2_trans_add_bh(ip->i_gl, dibh, 1); 935 gfs2_trans_add_meta(ip->i_gl, dibh);
935 inc_nlink(&ip->i_inode); 936 inc_nlink(&ip->i_inode);
936 ip->i_inode.i_ctime = CURRENT_TIME; 937 ip->i_inode.i_ctime = CURRENT_TIME;
937 ihold(inode); 938 ihold(inode);
@@ -978,8 +979,8 @@ static int gfs2_unlink_ok(struct gfs2_inode *dip, const struct qstr *name,
978 return -EPERM; 979 return -EPERM;
979 980
980 if ((dip->i_inode.i_mode & S_ISVTX) && 981 if ((dip->i_inode.i_mode & S_ISVTX) &&
981 dip->i_inode.i_uid != current_fsuid() && 982 !uid_eq(dip->i_inode.i_uid, current_fsuid()) &&
982 ip->i_inode.i_uid != current_fsuid() && !capable(CAP_FOWNER)) 983 !uid_eq(ip->i_inode.i_uid, current_fsuid()) && !capable(CAP_FOWNER))
983 return -EPERM; 984 return -EPERM;
984 985
985 if (IS_APPEND(&dip->i_inode)) 986 if (IS_APPEND(&dip->i_inode))
@@ -1412,7 +1413,7 @@ static int gfs2_rename(struct inode *odir, struct dentry *odentry,
1412 if (error) 1413 if (error)
1413 goto out_end_trans; 1414 goto out_end_trans;
1414 ip->i_inode.i_ctime = CURRENT_TIME; 1415 ip->i_inode.i_ctime = CURRENT_TIME;
1415 gfs2_trans_add_bh(ip->i_gl, dibh, 1); 1416 gfs2_trans_add_meta(ip->i_gl, dibh);
1416 gfs2_dinode_out(ip, dibh->b_data); 1417 gfs2_dinode_out(ip, dibh->b_data);
1417 brelse(dibh); 1418 brelse(dibh);
1418 } 1419 }
@@ -1580,7 +1581,8 @@ static int setattr_chown(struct inode *inode, struct iattr *attr)
1580{ 1581{
1581 struct gfs2_inode *ip = GFS2_I(inode); 1582 struct gfs2_inode *ip = GFS2_I(inode);
1582 struct gfs2_sbd *sdp = GFS2_SB(inode); 1583 struct gfs2_sbd *sdp = GFS2_SB(inode);
1583 u32 ouid, ogid, nuid, ngid; 1584 kuid_t ouid, nuid;
1585 kgid_t ogid, ngid;
1584 int error; 1586 int error;
1585 1587
1586 ouid = inode->i_uid; 1588 ouid = inode->i_uid;
@@ -1588,16 +1590,17 @@ static int setattr_chown(struct inode *inode, struct iattr *attr)
1588 nuid = attr->ia_uid; 1590 nuid = attr->ia_uid;
1589 ngid = attr->ia_gid; 1591 ngid = attr->ia_gid;
1590 1592
1591 if (!(attr->ia_valid & ATTR_UID) || ouid == nuid) 1593 if (!(attr->ia_valid & ATTR_UID) || uid_eq(ouid, nuid))
1592 ouid = nuid = NO_QUOTA_CHANGE; 1594 ouid = nuid = NO_UID_QUOTA_CHANGE;
1593 if (!(attr->ia_valid & ATTR_GID) || ogid == ngid) 1595 if (!(attr->ia_valid & ATTR_GID) || gid_eq(ogid, ngid))
1594 ogid = ngid = NO_QUOTA_CHANGE; 1596 ogid = ngid = NO_GID_QUOTA_CHANGE;
1595 1597
1596 error = gfs2_quota_lock(ip, nuid, ngid); 1598 error = gfs2_quota_lock(ip, nuid, ngid);
1597 if (error) 1599 if (error)
1598 return error; 1600 return error;
1599 1601
1600 if (ouid != NO_QUOTA_CHANGE || ogid != NO_QUOTA_CHANGE) { 1602 if (!uid_eq(ouid, NO_UID_QUOTA_CHANGE) ||
1603 !gid_eq(ogid, NO_GID_QUOTA_CHANGE)) {
1601 error = gfs2_quota_check(ip, nuid, ngid); 1604 error = gfs2_quota_check(ip, nuid, ngid);
1602 if (error) 1605 if (error)
1603 goto out_gunlock_q; 1606 goto out_gunlock_q;
@@ -1611,7 +1614,8 @@ static int setattr_chown(struct inode *inode, struct iattr *attr)
1611 if (error) 1614 if (error)
1612 goto out_end_trans; 1615 goto out_end_trans;
1613 1616
1614 if (ouid != NO_QUOTA_CHANGE || ogid != NO_QUOTA_CHANGE) { 1617 if (!uid_eq(ouid, NO_UID_QUOTA_CHANGE) ||
1618 !gid_eq(ogid, NO_GID_QUOTA_CHANGE)) {
1615 u64 blocks = gfs2_get_inode_blocks(&ip->i_inode); 1619 u64 blocks = gfs2_get_inode_blocks(&ip->i_inode);
1616 gfs2_quota_change(ip, -blocks, ouid, ogid); 1620 gfs2_quota_change(ip, -blocks, ouid, ogid);
1617 gfs2_quota_change(ip, blocks, nuid, ngid); 1621 gfs2_quota_change(ip, blocks, nuid, ngid);
diff --git a/fs/gfs2/lock_dlm.c b/fs/gfs2/lock_dlm.c
index b906ed17a839..9802de0f85e6 100644
--- a/fs/gfs2/lock_dlm.c
+++ b/fs/gfs2/lock_dlm.c
@@ -281,6 +281,7 @@ static void gdlm_put_lock(struct gfs2_glock *gl)
281{ 281{
282 struct gfs2_sbd *sdp = gl->gl_sbd; 282 struct gfs2_sbd *sdp = gl->gl_sbd;
283 struct lm_lockstruct *ls = &sdp->sd_lockstruct; 283 struct lm_lockstruct *ls = &sdp->sd_lockstruct;
284 int lvb_needs_unlock = 0;
284 int error; 285 int error;
285 286
286 if (gl->gl_lksb.sb_lkid == 0) { 287 if (gl->gl_lksb.sb_lkid == 0) {
@@ -294,8 +295,12 @@ static void gdlm_put_lock(struct gfs2_glock *gl)
294 gfs2_update_request_times(gl); 295 gfs2_update_request_times(gl);
295 296
296 /* don't want to skip dlm_unlock writing the lvb when lock is ex */ 297 /* don't want to skip dlm_unlock writing the lvb when lock is ex */
298
299 if (gl->gl_lksb.sb_lvbptr && (gl->gl_state == LM_ST_EXCLUSIVE))
300 lvb_needs_unlock = 1;
301
297 if (test_bit(SDF_SKIP_DLM_UNLOCK, &sdp->sd_flags) && 302 if (test_bit(SDF_SKIP_DLM_UNLOCK, &sdp->sd_flags) &&
298 gl->gl_lksb.sb_lvbptr && (gl->gl_state != LM_ST_EXCLUSIVE)) { 303 !lvb_needs_unlock) {
299 gfs2_glock_free(gl); 304 gfs2_glock_free(gl);
300 return; 305 return;
301 } 306 }
diff --git a/fs/gfs2/log.c b/fs/gfs2/log.c
index f4beeb9c81c1..9a2ca8be7647 100644
--- a/fs/gfs2/log.c
+++ b/fs/gfs2/log.c
@@ -482,70 +482,66 @@ static void log_flush_wait(struct gfs2_sbd *sdp)
482 } 482 }
483} 483}
484 484
485static int bd_cmp(void *priv, struct list_head *a, struct list_head *b) 485static int ip_cmp(void *priv, struct list_head *a, struct list_head *b)
486{ 486{
487 struct gfs2_bufdata *bda, *bdb; 487 struct gfs2_inode *ipa, *ipb;
488 488
489 bda = list_entry(a, struct gfs2_bufdata, bd_list); 489 ipa = list_entry(a, struct gfs2_inode, i_ordered);
490 bdb = list_entry(b, struct gfs2_bufdata, bd_list); 490 ipb = list_entry(b, struct gfs2_inode, i_ordered);
491 491
492 if (bda->bd_bh->b_blocknr < bdb->bd_bh->b_blocknr) 492 if (ipa->i_no_addr < ipb->i_no_addr)
493 return -1; 493 return -1;
494 if (bda->bd_bh->b_blocknr > bdb->bd_bh->b_blocknr) 494 if (ipa->i_no_addr > ipb->i_no_addr)
495 return 1; 495 return 1;
496 return 0; 496 return 0;
497} 497}
498 498
499static void gfs2_ordered_write(struct gfs2_sbd *sdp) 499static void gfs2_ordered_write(struct gfs2_sbd *sdp)
500{ 500{
501 struct gfs2_bufdata *bd; 501 struct gfs2_inode *ip;
502 struct buffer_head *bh;
503 LIST_HEAD(written); 502 LIST_HEAD(written);
504 503
505 gfs2_log_lock(sdp); 504 spin_lock(&sdp->sd_ordered_lock);
506 list_sort(NULL, &sdp->sd_log_le_ordered, &bd_cmp); 505 list_sort(NULL, &sdp->sd_log_le_ordered, &ip_cmp);
507 while (!list_empty(&sdp->sd_log_le_ordered)) { 506 while (!list_empty(&sdp->sd_log_le_ordered)) {
508 bd = list_entry(sdp->sd_log_le_ordered.next, struct gfs2_bufdata, bd_list); 507 ip = list_entry(sdp->sd_log_le_ordered.next, struct gfs2_inode, i_ordered);
509 list_move(&bd->bd_list, &written); 508 list_move(&ip->i_ordered, &written);
510 bh = bd->bd_bh; 509 if (ip->i_inode.i_mapping->nrpages == 0)
511 if (!buffer_dirty(bh))
512 continue; 510 continue;
513 get_bh(bh); 511 spin_unlock(&sdp->sd_ordered_lock);
514 gfs2_log_unlock(sdp); 512 filemap_fdatawrite(ip->i_inode.i_mapping);
515 lock_buffer(bh); 513 spin_lock(&sdp->sd_ordered_lock);
516 if (buffer_mapped(bh) && test_clear_buffer_dirty(bh)) {
517 bh->b_end_io = end_buffer_write_sync;
518 submit_bh(WRITE_SYNC, bh);
519 } else {
520 unlock_buffer(bh);
521 brelse(bh);
522 }
523 gfs2_log_lock(sdp);
524 } 514 }
525 list_splice(&written, &sdp->sd_log_le_ordered); 515 list_splice(&written, &sdp->sd_log_le_ordered);
526 gfs2_log_unlock(sdp); 516 spin_unlock(&sdp->sd_ordered_lock);
527} 517}
528 518
529static void gfs2_ordered_wait(struct gfs2_sbd *sdp) 519static void gfs2_ordered_wait(struct gfs2_sbd *sdp)
530{ 520{
531 struct gfs2_bufdata *bd; 521 struct gfs2_inode *ip;
532 struct buffer_head *bh;
533 522
534 gfs2_log_lock(sdp); 523 spin_lock(&sdp->sd_ordered_lock);
535 while (!list_empty(&sdp->sd_log_le_ordered)) { 524 while (!list_empty(&sdp->sd_log_le_ordered)) {
536 bd = list_entry(sdp->sd_log_le_ordered.prev, struct gfs2_bufdata, bd_list); 525 ip = list_entry(sdp->sd_log_le_ordered.next, struct gfs2_inode, i_ordered);
537 bh = bd->bd_bh; 526 list_del(&ip->i_ordered);
538 if (buffer_locked(bh)) { 527 WARN_ON(!test_and_clear_bit(GIF_ORDERED, &ip->i_flags));
539 get_bh(bh); 528 if (ip->i_inode.i_mapping->nrpages == 0)
540 gfs2_log_unlock(sdp);
541 wait_on_buffer(bh);
542 brelse(bh);
543 gfs2_log_lock(sdp);
544 continue; 529 continue;
545 } 530 spin_unlock(&sdp->sd_ordered_lock);
546 list_del_init(&bd->bd_list); 531 filemap_fdatawait(ip->i_inode.i_mapping);
532 spin_lock(&sdp->sd_ordered_lock);
547 } 533 }
548 gfs2_log_unlock(sdp); 534 spin_unlock(&sdp->sd_ordered_lock);
535}
536
537void gfs2_ordered_del_inode(struct gfs2_inode *ip)
538{
539 struct gfs2_sbd *sdp = GFS2_SB(&ip->i_inode);
540
541 spin_lock(&sdp->sd_ordered_lock);
542 if (test_and_clear_bit(GIF_ORDERED, &ip->i_flags))
543 list_del(&ip->i_ordered);
544 spin_unlock(&sdp->sd_ordered_lock);
549} 545}
550 546
551/** 547/**
diff --git a/fs/gfs2/log.h b/fs/gfs2/log.h
index 3fd5215ea25f..3566f35915e0 100644
--- a/fs/gfs2/log.h
+++ b/fs/gfs2/log.h
@@ -48,6 +48,18 @@ static inline void gfs2_log_pointers_init(struct gfs2_sbd *sdp,
48 sdp->sd_log_head = sdp->sd_log_tail = value; 48 sdp->sd_log_head = sdp->sd_log_tail = value;
49} 49}
50 50
51static inline void gfs2_ordered_add_inode(struct gfs2_inode *ip)
52{
53 struct gfs2_sbd *sdp = GFS2_SB(&ip->i_inode);
54
55 if (!test_bit(GIF_ORDERED, &ip->i_flags)) {
56 spin_lock(&sdp->sd_ordered_lock);
57 if (!test_and_set_bit(GIF_ORDERED, &ip->i_flags))
58 list_add(&ip->i_ordered, &sdp->sd_log_le_ordered);
59 spin_unlock(&sdp->sd_ordered_lock);
60 }
61}
62extern void gfs2_ordered_del_inode(struct gfs2_inode *ip);
51extern unsigned int gfs2_struct2blk(struct gfs2_sbd *sdp, unsigned int nstruct, 63extern unsigned int gfs2_struct2blk(struct gfs2_sbd *sdp, unsigned int nstruct,
52 unsigned int ssize); 64 unsigned int ssize);
53 65
diff --git a/fs/gfs2/lops.c b/fs/gfs2/lops.c
index 9ceccb1595a3..a5055977a214 100644
--- a/fs/gfs2/lops.c
+++ b/fs/gfs2/lops.c
@@ -37,7 +37,7 @@
37 * 37 *
38 * The log lock must be held when calling this function 38 * The log lock must be held when calling this function
39 */ 39 */
40static void gfs2_pin(struct gfs2_sbd *sdp, struct buffer_head *bh) 40void gfs2_pin(struct gfs2_sbd *sdp, struct buffer_head *bh)
41{ 41{
42 struct gfs2_bufdata *bd; 42 struct gfs2_bufdata *bd;
43 43
@@ -388,32 +388,6 @@ static struct page *gfs2_get_log_desc(struct gfs2_sbd *sdp, u32 ld_type,
388 return page; 388 return page;
389} 389}
390 390
391static void buf_lo_add(struct gfs2_sbd *sdp, struct gfs2_bufdata *bd)
392{
393 struct gfs2_meta_header *mh;
394 struct gfs2_trans *tr;
395
396 tr = current->journal_info;
397 tr->tr_touched = 1;
398 if (!list_empty(&bd->bd_list))
399 return;
400 set_bit(GLF_LFLUSH, &bd->bd_gl->gl_flags);
401 set_bit(GLF_DIRTY, &bd->bd_gl->gl_flags);
402 mh = (struct gfs2_meta_header *)bd->bd_bh->b_data;
403 if (unlikely(mh->mh_magic != cpu_to_be32(GFS2_MAGIC))) {
404 printk(KERN_ERR
405 "Attempting to add uninitialised block to journal (inplace block=%lld)\n",
406 (unsigned long long)bd->bd_bh->b_blocknr);
407 BUG();
408 }
409 gfs2_pin(sdp, bd->bd_bh);
410 mh->__pad0 = cpu_to_be64(0);
411 mh->mh_jid = cpu_to_be32(sdp->sd_jdesc->jd_jid);
412 sdp->sd_log_num_buf++;
413 list_add(&bd->bd_list, &sdp->sd_log_le_buf);
414 tr->tr_num_buf_new++;
415}
416
417static void gfs2_check_magic(struct buffer_head *bh) 391static void gfs2_check_magic(struct buffer_head *bh)
418{ 392{
419 void *kaddr; 393 void *kaddr;
@@ -600,20 +574,6 @@ static void buf_lo_after_scan(struct gfs2_jdesc *jd, int error, int pass)
600 jd->jd_jid, sdp->sd_replayed_blocks, sdp->sd_found_blocks); 574 jd->jd_jid, sdp->sd_replayed_blocks, sdp->sd_found_blocks);
601} 575}
602 576
603static void revoke_lo_add(struct gfs2_sbd *sdp, struct gfs2_bufdata *bd)
604{
605 struct gfs2_glock *gl = bd->bd_gl;
606 struct gfs2_trans *tr;
607
608 tr = current->journal_info;
609 tr->tr_touched = 1;
610 tr->tr_num_revoke++;
611 sdp->sd_log_num_revoke++;
612 atomic_inc(&gl->gl_revokes);
613 set_bit(GLF_LFLUSH, &gl->gl_flags);
614 list_add(&bd->bd_list, &sdp->sd_log_le_revoke);
615}
616
617static void revoke_lo_before_commit(struct gfs2_sbd *sdp) 577static void revoke_lo_before_commit(struct gfs2_sbd *sdp)
618{ 578{
619 struct gfs2_meta_header *mh; 579 struct gfs2_meta_header *mh;
@@ -749,44 +709,6 @@ static void revoke_lo_after_scan(struct gfs2_jdesc *jd, int error, int pass)
749} 709}
750 710
751/** 711/**
752 * databuf_lo_add - Add a databuf to the transaction.
753 *
754 * This is used in two distinct cases:
755 * i) In ordered write mode
756 * We put the data buffer on a list so that we can ensure that its
757 * synced to disk at the right time
758 * ii) In journaled data mode
759 * We need to journal the data block in the same way as metadata in
760 * the functions above. The difference is that here we have a tag
761 * which is two __be64's being the block number (as per meta data)
762 * and a flag which says whether the data block needs escaping or
763 * not. This means we need a new log entry for each 251 or so data
764 * blocks, which isn't an enormous overhead but twice as much as
765 * for normal metadata blocks.
766 */
767static void databuf_lo_add(struct gfs2_sbd *sdp, struct gfs2_bufdata *bd)
768{
769 struct gfs2_trans *tr = current->journal_info;
770 struct address_space *mapping = bd->bd_bh->b_page->mapping;
771 struct gfs2_inode *ip = GFS2_I(mapping->host);
772
773 if (tr)
774 tr->tr_touched = 1;
775 if (!list_empty(&bd->bd_list))
776 return;
777 set_bit(GLF_LFLUSH, &bd->bd_gl->gl_flags);
778 set_bit(GLF_DIRTY, &bd->bd_gl->gl_flags);
779 if (gfs2_is_jdata(ip)) {
780 gfs2_pin(sdp, bd->bd_bh);
781 tr->tr_num_databuf_new++;
782 sdp->sd_log_num_databuf++;
783 list_add_tail(&bd->bd_list, &sdp->sd_log_le_databuf);
784 } else {
785 list_add_tail(&bd->bd_list, &sdp->sd_log_le_ordered);
786 }
787}
788
789/**
790 * databuf_lo_before_commit - Scan the data buffers, writing as we go 712 * databuf_lo_before_commit - Scan the data buffers, writing as we go
791 * 713 *
792 */ 714 */
@@ -885,7 +807,6 @@ static void databuf_lo_after_commit(struct gfs2_sbd *sdp, struct gfs2_ail *ai)
885 807
886 808
887const struct gfs2_log_operations gfs2_buf_lops = { 809const struct gfs2_log_operations gfs2_buf_lops = {
888 .lo_add = buf_lo_add,
889 .lo_before_commit = buf_lo_before_commit, 810 .lo_before_commit = buf_lo_before_commit,
890 .lo_after_commit = buf_lo_after_commit, 811 .lo_after_commit = buf_lo_after_commit,
891 .lo_before_scan = buf_lo_before_scan, 812 .lo_before_scan = buf_lo_before_scan,
@@ -895,7 +816,6 @@ const struct gfs2_log_operations gfs2_buf_lops = {
895}; 816};
896 817
897const struct gfs2_log_operations gfs2_revoke_lops = { 818const struct gfs2_log_operations gfs2_revoke_lops = {
898 .lo_add = revoke_lo_add,
899 .lo_before_commit = revoke_lo_before_commit, 819 .lo_before_commit = revoke_lo_before_commit,
900 .lo_after_commit = revoke_lo_after_commit, 820 .lo_after_commit = revoke_lo_after_commit,
901 .lo_before_scan = revoke_lo_before_scan, 821 .lo_before_scan = revoke_lo_before_scan,
@@ -909,7 +829,6 @@ const struct gfs2_log_operations gfs2_rg_lops = {
909}; 829};
910 830
911const struct gfs2_log_operations gfs2_databuf_lops = { 831const struct gfs2_log_operations gfs2_databuf_lops = {
912 .lo_add = databuf_lo_add,
913 .lo_before_commit = databuf_lo_before_commit, 832 .lo_before_commit = databuf_lo_before_commit,
914 .lo_after_commit = databuf_lo_after_commit, 833 .lo_after_commit = databuf_lo_after_commit,
915 .lo_scan_elements = databuf_lo_scan_elements, 834 .lo_scan_elements = databuf_lo_scan_elements,
diff --git a/fs/gfs2/lops.h b/fs/gfs2/lops.h
index 954a330585f4..ba77b7da8325 100644
--- a/fs/gfs2/lops.h
+++ b/fs/gfs2/lops.h
@@ -29,6 +29,7 @@ extern const struct gfs2_log_operations gfs2_databuf_lops;
29extern const struct gfs2_log_operations *gfs2_log_ops[]; 29extern const struct gfs2_log_operations *gfs2_log_ops[];
30extern void gfs2_log_write_page(struct gfs2_sbd *sdp, struct page *page); 30extern void gfs2_log_write_page(struct gfs2_sbd *sdp, struct page *page);
31extern void gfs2_log_flush_bio(struct gfs2_sbd *sdp, int rw); 31extern void gfs2_log_flush_bio(struct gfs2_sbd *sdp, int rw);
32extern void gfs2_pin(struct gfs2_sbd *sdp, struct buffer_head *bh);
32 33
33static inline unsigned int buf_limit(struct gfs2_sbd *sdp) 34static inline unsigned int buf_limit(struct gfs2_sbd *sdp)
34{ 35{
@@ -46,19 +47,6 @@ static inline unsigned int databuf_limit(struct gfs2_sbd *sdp)
46 return limit; 47 return limit;
47} 48}
48 49
49static inline void lops_init_le(struct gfs2_bufdata *bd,
50 const struct gfs2_log_operations *lops)
51{
52 INIT_LIST_HEAD(&bd->bd_list);
53 bd->bd_ops = lops;
54}
55
56static inline void lops_add(struct gfs2_sbd *sdp, struct gfs2_bufdata *bd)
57{
58 if (bd->bd_ops->lo_add)
59 bd->bd_ops->lo_add(sdp, bd);
60}
61
62static inline void lops_before_commit(struct gfs2_sbd *sdp) 50static inline void lops_before_commit(struct gfs2_sbd *sdp)
63{ 51{
64 int x; 52 int x;
diff --git a/fs/gfs2/meta_io.c b/fs/gfs2/meta_io.c
index 22255d96b27e..b059bbb5059e 100644
--- a/fs/gfs2/meta_io.c
+++ b/fs/gfs2/meta_io.c
@@ -271,41 +271,6 @@ int gfs2_meta_wait(struct gfs2_sbd *sdp, struct buffer_head *bh)
271 return 0; 271 return 0;
272} 272}
273 273
274/**
275 * gfs2_attach_bufdata - attach a struct gfs2_bufdata structure to a buffer
276 * @gl: the glock the buffer belongs to
277 * @bh: The buffer to be attached to
278 * @meta: Flag to indicate whether its metadata or not
279 */
280
281void gfs2_attach_bufdata(struct gfs2_glock *gl, struct buffer_head *bh,
282 int meta)
283{
284 struct gfs2_bufdata *bd;
285
286 if (meta)
287 lock_page(bh->b_page);
288
289 if (bh->b_private) {
290 if (meta)
291 unlock_page(bh->b_page);
292 return;
293 }
294
295 bd = kmem_cache_zalloc(gfs2_bufdata_cachep, GFP_NOFS | __GFP_NOFAIL);
296 bd->bd_bh = bh;
297 bd->bd_gl = gl;
298
299 if (meta)
300 lops_init_le(bd, &gfs2_buf_lops);
301 else
302 lops_init_le(bd, &gfs2_databuf_lops);
303 bh->b_private = bd;
304
305 if (meta)
306 unlock_page(bh->b_page);
307}
308
309void gfs2_remove_from_journal(struct buffer_head *bh, struct gfs2_trans *tr, int meta) 274void gfs2_remove_from_journal(struct buffer_head *bh, struct gfs2_trans *tr, int meta)
310{ 275{
311 struct address_space *mapping = bh->b_page->mapping; 276 struct address_space *mapping = bh->b_page->mapping;
diff --git a/fs/gfs2/meta_io.h b/fs/gfs2/meta_io.h
index c30973b07a7c..0d4c843b6f8e 100644
--- a/fs/gfs2/meta_io.h
+++ b/fs/gfs2/meta_io.h
@@ -56,9 +56,6 @@ int gfs2_meta_read(struct gfs2_glock *gl, u64 blkno,
56int gfs2_meta_wait(struct gfs2_sbd *sdp, struct buffer_head *bh); 56int gfs2_meta_wait(struct gfs2_sbd *sdp, struct buffer_head *bh);
57struct buffer_head *gfs2_getbuf(struct gfs2_glock *gl, u64 blkno, int create); 57struct buffer_head *gfs2_getbuf(struct gfs2_glock *gl, u64 blkno, int create);
58 58
59void gfs2_attach_bufdata(struct gfs2_glock *gl, struct buffer_head *bh,
60 int meta);
61
62void gfs2_remove_from_journal(struct buffer_head *bh, struct gfs2_trans *tr, 59void gfs2_remove_from_journal(struct buffer_head *bh, struct gfs2_trans *tr,
63 int meta); 60 int meta);
64 61
diff --git a/fs/gfs2/ops_fstype.c b/fs/gfs2/ops_fstype.c
index 0e3554edb8f2..1b612be4b873 100644
--- a/fs/gfs2/ops_fstype.c
+++ b/fs/gfs2/ops_fstype.c
@@ -81,6 +81,7 @@ static struct gfs2_sbd *init_sbd(struct super_block *sb)
81 init_waitqueue_head(&sdp->sd_glock_wait); 81 init_waitqueue_head(&sdp->sd_glock_wait);
82 atomic_set(&sdp->sd_glock_disposal, 0); 82 atomic_set(&sdp->sd_glock_disposal, 0);
83 init_completion(&sdp->sd_locking_init); 83 init_completion(&sdp->sd_locking_init);
84 init_completion(&sdp->sd_wdack);
84 spin_lock_init(&sdp->sd_statfs_spin); 85 spin_lock_init(&sdp->sd_statfs_spin);
85 86
86 spin_lock_init(&sdp->sd_rindex_spin); 87 spin_lock_init(&sdp->sd_rindex_spin);
@@ -102,6 +103,7 @@ static struct gfs2_sbd *init_sbd(struct super_block *sb)
102 INIT_LIST_HEAD(&sdp->sd_log_le_revoke); 103 INIT_LIST_HEAD(&sdp->sd_log_le_revoke);
103 INIT_LIST_HEAD(&sdp->sd_log_le_databuf); 104 INIT_LIST_HEAD(&sdp->sd_log_le_databuf);
104 INIT_LIST_HEAD(&sdp->sd_log_le_ordered); 105 INIT_LIST_HEAD(&sdp->sd_log_le_ordered);
106 spin_lock_init(&sdp->sd_ordered_lock);
105 107
106 init_waitqueue_head(&sdp->sd_log_waitq); 108 init_waitqueue_head(&sdp->sd_log_waitq);
107 init_waitqueue_head(&sdp->sd_logd_waitq); 109 init_waitqueue_head(&sdp->sd_logd_waitq);
@@ -115,8 +117,6 @@ static struct gfs2_sbd *init_sbd(struct super_block *sb)
115 117
116 INIT_LIST_HEAD(&sdp->sd_revoke_list); 118 INIT_LIST_HEAD(&sdp->sd_revoke_list);
117 119
118 mutex_init(&sdp->sd_freeze_lock);
119
120 return sdp; 120 return sdp;
121} 121}
122 122
diff --git a/fs/gfs2/quota.c b/fs/gfs2/quota.c
index ae55e248c3b7..c7c840e916f8 100644
--- a/fs/gfs2/quota.c
+++ b/fs/gfs2/quota.c
@@ -65,13 +65,10 @@
65#include "inode.h" 65#include "inode.h"
66#include "util.h" 66#include "util.h"
67 67
68#define QUOTA_USER 1
69#define QUOTA_GROUP 0
70
71struct gfs2_quota_change_host { 68struct gfs2_quota_change_host {
72 u64 qc_change; 69 u64 qc_change;
73 u32 qc_flags; /* GFS2_QCF_... */ 70 u32 qc_flags; /* GFS2_QCF_... */
74 u32 qc_id; 71 struct kqid qc_id;
75}; 72};
76 73
77static LIST_HEAD(qd_lru_list); 74static LIST_HEAD(qd_lru_list);
@@ -120,17 +117,24 @@ out:
120 return (atomic_read(&qd_lru_count) * sysctl_vfs_cache_pressure) / 100; 117 return (atomic_read(&qd_lru_count) * sysctl_vfs_cache_pressure) / 100;
121} 118}
122 119
120static u64 qd2index(struct gfs2_quota_data *qd)
121{
122 struct kqid qid = qd->qd_id;
123 return (2 * (u64)from_kqid(&init_user_ns, qid)) +
124 (qid.type == USRQUOTA) ? 0 : 1;
125}
126
123static u64 qd2offset(struct gfs2_quota_data *qd) 127static u64 qd2offset(struct gfs2_quota_data *qd)
124{ 128{
125 u64 offset; 129 u64 offset;
126 130
127 offset = 2 * (u64)qd->qd_id + !test_bit(QDF_USER, &qd->qd_flags); 131 offset = qd2index(qd);
128 offset *= sizeof(struct gfs2_quota); 132 offset *= sizeof(struct gfs2_quota);
129 133
130 return offset; 134 return offset;
131} 135}
132 136
133static int qd_alloc(struct gfs2_sbd *sdp, int user, u32 id, 137static int qd_alloc(struct gfs2_sbd *sdp, struct kqid qid,
134 struct gfs2_quota_data **qdp) 138 struct gfs2_quota_data **qdp)
135{ 139{
136 struct gfs2_quota_data *qd; 140 struct gfs2_quota_data *qd;
@@ -141,13 +145,11 @@ static int qd_alloc(struct gfs2_sbd *sdp, int user, u32 id,
141 return -ENOMEM; 145 return -ENOMEM;
142 146
143 atomic_set(&qd->qd_count, 1); 147 atomic_set(&qd->qd_count, 1);
144 qd->qd_id = id; 148 qd->qd_id = qid;
145 if (user)
146 set_bit(QDF_USER, &qd->qd_flags);
147 qd->qd_slot = -1; 149 qd->qd_slot = -1;
148 INIT_LIST_HEAD(&qd->qd_reclaim); 150 INIT_LIST_HEAD(&qd->qd_reclaim);
149 151
150 error = gfs2_glock_get(sdp, 2 * (u64)id + !user, 152 error = gfs2_glock_get(sdp, qd2index(qd),
151 &gfs2_quota_glops, CREATE, &qd->qd_gl); 153 &gfs2_quota_glops, CREATE, &qd->qd_gl);
152 if (error) 154 if (error)
153 goto fail; 155 goto fail;
@@ -161,7 +163,7 @@ fail:
161 return error; 163 return error;
162} 164}
163 165
164static int qd_get(struct gfs2_sbd *sdp, int user, u32 id, 166static int qd_get(struct gfs2_sbd *sdp, struct kqid qid,
165 struct gfs2_quota_data **qdp) 167 struct gfs2_quota_data **qdp)
166{ 168{
167 struct gfs2_quota_data *qd = NULL, *new_qd = NULL; 169 struct gfs2_quota_data *qd = NULL, *new_qd = NULL;
@@ -173,8 +175,7 @@ static int qd_get(struct gfs2_sbd *sdp, int user, u32 id,
173 found = 0; 175 found = 0;
174 spin_lock(&qd_lru_lock); 176 spin_lock(&qd_lru_lock);
175 list_for_each_entry(qd, &sdp->sd_quota_list, qd_list) { 177 list_for_each_entry(qd, &sdp->sd_quota_list, qd_list) {
176 if (qd->qd_id == id && 178 if (qid_eq(qd->qd_id, qid)) {
177 !test_bit(QDF_USER, &qd->qd_flags) == !user) {
178 if (!atomic_read(&qd->qd_count) && 179 if (!atomic_read(&qd->qd_count) &&
179 !list_empty(&qd->qd_reclaim)) { 180 !list_empty(&qd->qd_reclaim)) {
180 /* Remove it from reclaim list */ 181 /* Remove it from reclaim list */
@@ -208,7 +209,7 @@ static int qd_get(struct gfs2_sbd *sdp, int user, u32 id,
208 return 0; 209 return 0;
209 } 210 }
210 211
211 error = qd_alloc(sdp, user, id, &new_qd); 212 error = qd_alloc(sdp, qid, &new_qd);
212 if (error) 213 if (error)
213 return error; 214 return error;
214 } 215 }
@@ -458,12 +459,12 @@ static void qd_unlock(struct gfs2_quota_data *qd)
458 qd_put(qd); 459 qd_put(qd);
459} 460}
460 461
461static int qdsb_get(struct gfs2_sbd *sdp, int user, u32 id, 462static int qdsb_get(struct gfs2_sbd *sdp, struct kqid qid,
462 struct gfs2_quota_data **qdp) 463 struct gfs2_quota_data **qdp)
463{ 464{
464 int error; 465 int error;
465 466
466 error = qd_get(sdp, user, id, qdp); 467 error = qd_get(sdp, qid, qdp);
467 if (error) 468 if (error)
468 return error; 469 return error;
469 470
@@ -491,7 +492,7 @@ static void qdsb_put(struct gfs2_quota_data *qd)
491 qd_put(qd); 492 qd_put(qd);
492} 493}
493 494
494int gfs2_quota_hold(struct gfs2_inode *ip, u32 uid, u32 gid) 495int gfs2_quota_hold(struct gfs2_inode *ip, kuid_t uid, kgid_t gid)
495{ 496{
496 struct gfs2_sbd *sdp = GFS2_SB(&ip->i_inode); 497 struct gfs2_sbd *sdp = GFS2_SB(&ip->i_inode);
497 struct gfs2_quota_data **qd; 498 struct gfs2_quota_data **qd;
@@ -512,28 +513,30 @@ int gfs2_quota_hold(struct gfs2_inode *ip, u32 uid, u32 gid)
512 if (sdp->sd_args.ar_quota == GFS2_QUOTA_OFF) 513 if (sdp->sd_args.ar_quota == GFS2_QUOTA_OFF)
513 return 0; 514 return 0;
514 515
515 error = qdsb_get(sdp, QUOTA_USER, ip->i_inode.i_uid, qd); 516 error = qdsb_get(sdp, make_kqid_uid(ip->i_inode.i_uid), qd);
516 if (error) 517 if (error)
517 goto out; 518 goto out;
518 ip->i_res->rs_qa_qd_num++; 519 ip->i_res->rs_qa_qd_num++;
519 qd++; 520 qd++;
520 521
521 error = qdsb_get(sdp, QUOTA_GROUP, ip->i_inode.i_gid, qd); 522 error = qdsb_get(sdp, make_kqid_gid(ip->i_inode.i_gid), qd);
522 if (error) 523 if (error)
523 goto out; 524 goto out;
524 ip->i_res->rs_qa_qd_num++; 525 ip->i_res->rs_qa_qd_num++;
525 qd++; 526 qd++;
526 527
527 if (uid != NO_QUOTA_CHANGE && uid != ip->i_inode.i_uid) { 528 if (!uid_eq(uid, NO_UID_QUOTA_CHANGE) &&
528 error = qdsb_get(sdp, QUOTA_USER, uid, qd); 529 !uid_eq(uid, ip->i_inode.i_uid)) {
530 error = qdsb_get(sdp, make_kqid_uid(uid), qd);
529 if (error) 531 if (error)
530 goto out; 532 goto out;
531 ip->i_res->rs_qa_qd_num++; 533 ip->i_res->rs_qa_qd_num++;
532 qd++; 534 qd++;
533 } 535 }
534 536
535 if (gid != NO_QUOTA_CHANGE && gid != ip->i_inode.i_gid) { 537 if (!gid_eq(gid, NO_GID_QUOTA_CHANGE) &&
536 error = qdsb_get(sdp, QUOTA_GROUP, gid, qd); 538 !gid_eq(gid, ip->i_inode.i_gid)) {
539 error = qdsb_get(sdp, make_kqid_gid(gid), qd);
537 if (error) 540 if (error)
538 goto out; 541 goto out;
539 ip->i_res->rs_qa_qd_num++; 542 ip->i_res->rs_qa_qd_num++;
@@ -567,18 +570,10 @@ static int sort_qd(const void *a, const void *b)
567 const struct gfs2_quota_data *qd_a = *(const struct gfs2_quota_data **)a; 570 const struct gfs2_quota_data *qd_a = *(const struct gfs2_quota_data **)a;
568 const struct gfs2_quota_data *qd_b = *(const struct gfs2_quota_data **)b; 571 const struct gfs2_quota_data *qd_b = *(const struct gfs2_quota_data **)b;
569 572
570 if (!test_bit(QDF_USER, &qd_a->qd_flags) != 573 if (qid_lt(qd_a->qd_id, qd_b->qd_id))
571 !test_bit(QDF_USER, &qd_b->qd_flags)) {
572 if (test_bit(QDF_USER, &qd_a->qd_flags))
573 return -1;
574 else
575 return 1;
576 }
577 if (qd_a->qd_id < qd_b->qd_id)
578 return -1; 574 return -1;
579 if (qd_a->qd_id > qd_b->qd_id) 575 if (qid_lt(qd_b->qd_id, qd_a->qd_id))
580 return 1; 576 return 1;
581
582 return 0; 577 return 0;
583} 578}
584 579
@@ -590,14 +585,14 @@ static void do_qc(struct gfs2_quota_data *qd, s64 change)
590 s64 x; 585 s64 x;
591 586
592 mutex_lock(&sdp->sd_quota_mutex); 587 mutex_lock(&sdp->sd_quota_mutex);
593 gfs2_trans_add_bh(ip->i_gl, qd->qd_bh, 1); 588 gfs2_trans_add_meta(ip->i_gl, qd->qd_bh);
594 589
595 if (!test_bit(QDF_CHANGE, &qd->qd_flags)) { 590 if (!test_bit(QDF_CHANGE, &qd->qd_flags)) {
596 qc->qc_change = 0; 591 qc->qc_change = 0;
597 qc->qc_flags = 0; 592 qc->qc_flags = 0;
598 if (test_bit(QDF_USER, &qd->qd_flags)) 593 if (qd->qd_id.type == USRQUOTA)
599 qc->qc_flags = cpu_to_be32(GFS2_QCF_USER); 594 qc->qc_flags = cpu_to_be32(GFS2_QCF_USER);
600 qc->qc_id = cpu_to_be32(qd->qd_id); 595 qc->qc_id = cpu_to_be32(from_kqid(&init_user_ns, qd->qd_id));
601 } 596 }
602 597
603 x = be64_to_cpu(qc->qc_change) + change; 598 x = be64_to_cpu(qc->qc_change) + change;
@@ -726,7 +721,7 @@ get_a_page:
726 goto unlock_out; 721 goto unlock_out;
727 } 722 }
728 723
729 gfs2_trans_add_bh(ip->i_gl, bh, 0); 724 gfs2_trans_add_meta(ip->i_gl, bh);
730 725
731 kaddr = kmap_atomic(page); 726 kaddr = kmap_atomic(page);
732 if (offset + sizeof(struct gfs2_quota) > PAGE_CACHE_SIZE) 727 if (offset + sizeof(struct gfs2_quota) > PAGE_CACHE_SIZE)
@@ -925,7 +920,7 @@ fail:
925 return error; 920 return error;
926} 921}
927 922
928int gfs2_quota_lock(struct gfs2_inode *ip, u32 uid, u32 gid) 923int gfs2_quota_lock(struct gfs2_inode *ip, kuid_t uid, kgid_t gid)
929{ 924{
930 struct gfs2_sbd *sdp = GFS2_SB(&ip->i_inode); 925 struct gfs2_sbd *sdp = GFS2_SB(&ip->i_inode);
931 struct gfs2_quota_data *qd; 926 struct gfs2_quota_data *qd;
@@ -1040,13 +1035,13 @@ static int print_message(struct gfs2_quota_data *qd, char *type)
1040 1035
1041 printk(KERN_INFO "GFS2: fsid=%s: quota %s for %s %u\n", 1036 printk(KERN_INFO "GFS2: fsid=%s: quota %s for %s %u\n",
1042 sdp->sd_fsname, type, 1037 sdp->sd_fsname, type,
1043 (test_bit(QDF_USER, &qd->qd_flags)) ? "user" : "group", 1038 (qd->qd_id.type == USRQUOTA) ? "user" : "group",
1044 qd->qd_id); 1039 from_kqid(&init_user_ns, qd->qd_id));
1045 1040
1046 return 0; 1041 return 0;
1047} 1042}
1048 1043
1049int gfs2_quota_check(struct gfs2_inode *ip, u32 uid, u32 gid) 1044int gfs2_quota_check(struct gfs2_inode *ip, kuid_t uid, kgid_t gid)
1050{ 1045{
1051 struct gfs2_sbd *sdp = GFS2_SB(&ip->i_inode); 1046 struct gfs2_sbd *sdp = GFS2_SB(&ip->i_inode);
1052 struct gfs2_quota_data *qd; 1047 struct gfs2_quota_data *qd;
@@ -1063,8 +1058,8 @@ int gfs2_quota_check(struct gfs2_inode *ip, u32 uid, u32 gid)
1063 for (x = 0; x < ip->i_res->rs_qa_qd_num; x++) { 1058 for (x = 0; x < ip->i_res->rs_qa_qd_num; x++) {
1064 qd = ip->i_res->rs_qa_qd[x]; 1059 qd = ip->i_res->rs_qa_qd[x];
1065 1060
1066 if (!((qd->qd_id == uid && test_bit(QDF_USER, &qd->qd_flags)) || 1061 if (!(qid_eq(qd->qd_id, make_kqid_uid(uid)) ||
1067 (qd->qd_id == gid && !test_bit(QDF_USER, &qd->qd_flags)))) 1062 qid_eq(qd->qd_id, make_kqid_gid(gid))))
1068 continue; 1063 continue;
1069 1064
1070 value = (s64)be64_to_cpu(qd->qd_qb.qb_value); 1065 value = (s64)be64_to_cpu(qd->qd_qb.qb_value);
@@ -1074,10 +1069,7 @@ int gfs2_quota_check(struct gfs2_inode *ip, u32 uid, u32 gid)
1074 1069
1075 if (be64_to_cpu(qd->qd_qb.qb_limit) && (s64)be64_to_cpu(qd->qd_qb.qb_limit) < value) { 1070 if (be64_to_cpu(qd->qd_qb.qb_limit) && (s64)be64_to_cpu(qd->qd_qb.qb_limit) < value) {
1076 print_message(qd, "exceeded"); 1071 print_message(qd, "exceeded");
1077 quota_send_warning(make_kqid(&init_user_ns, 1072 quota_send_warning(qd->qd_id,
1078 test_bit(QDF_USER, &qd->qd_flags) ?
1079 USRQUOTA : GRPQUOTA,
1080 qd->qd_id),
1081 sdp->sd_vfs->s_dev, QUOTA_NL_BHARDWARN); 1073 sdp->sd_vfs->s_dev, QUOTA_NL_BHARDWARN);
1082 1074
1083 error = -EDQUOT; 1075 error = -EDQUOT;
@@ -1087,10 +1079,7 @@ int gfs2_quota_check(struct gfs2_inode *ip, u32 uid, u32 gid)
1087 time_after_eq(jiffies, qd->qd_last_warn + 1079 time_after_eq(jiffies, qd->qd_last_warn +
1088 gfs2_tune_get(sdp, 1080 gfs2_tune_get(sdp,
1089 gt_quota_warn_period) * HZ)) { 1081 gt_quota_warn_period) * HZ)) {
1090 quota_send_warning(make_kqid(&init_user_ns, 1082 quota_send_warning(qd->qd_id,
1091 test_bit(QDF_USER, &qd->qd_flags) ?
1092 USRQUOTA : GRPQUOTA,
1093 qd->qd_id),
1094 sdp->sd_vfs->s_dev, QUOTA_NL_BSOFTWARN); 1083 sdp->sd_vfs->s_dev, QUOTA_NL_BSOFTWARN);
1095 error = print_message(qd, "warning"); 1084 error = print_message(qd, "warning");
1096 qd->qd_last_warn = jiffies; 1085 qd->qd_last_warn = jiffies;
@@ -1101,7 +1090,7 @@ int gfs2_quota_check(struct gfs2_inode *ip, u32 uid, u32 gid)
1101} 1090}
1102 1091
1103void gfs2_quota_change(struct gfs2_inode *ip, s64 change, 1092void gfs2_quota_change(struct gfs2_inode *ip, s64 change,
1104 u32 uid, u32 gid) 1093 kuid_t uid, kgid_t gid)
1105{ 1094{
1106 struct gfs2_quota_data *qd; 1095 struct gfs2_quota_data *qd;
1107 unsigned int x; 1096 unsigned int x;
@@ -1114,8 +1103,8 @@ void gfs2_quota_change(struct gfs2_inode *ip, s64 change,
1114 for (x = 0; x < ip->i_res->rs_qa_qd_num; x++) { 1103 for (x = 0; x < ip->i_res->rs_qa_qd_num; x++) {
1115 qd = ip->i_res->rs_qa_qd[x]; 1104 qd = ip->i_res->rs_qa_qd[x];
1116 1105
1117 if ((qd->qd_id == uid && test_bit(QDF_USER, &qd->qd_flags)) || 1106 if (qid_eq(qd->qd_id, make_kqid_uid(uid)) ||
1118 (qd->qd_id == gid && !test_bit(QDF_USER, &qd->qd_flags))) { 1107 qid_eq(qd->qd_id, make_kqid_gid(gid))) {
1119 do_qc(qd, change); 1108 do_qc(qd, change);
1120 } 1109 }
1121 } 1110 }
@@ -1170,13 +1159,13 @@ static int gfs2_quota_sync_timeo(struct super_block *sb, int type)
1170 return gfs2_quota_sync(sb, type); 1159 return gfs2_quota_sync(sb, type);
1171} 1160}
1172 1161
1173int gfs2_quota_refresh(struct gfs2_sbd *sdp, int user, u32 id) 1162int gfs2_quota_refresh(struct gfs2_sbd *sdp, struct kqid qid)
1174{ 1163{
1175 struct gfs2_quota_data *qd; 1164 struct gfs2_quota_data *qd;
1176 struct gfs2_holder q_gh; 1165 struct gfs2_holder q_gh;
1177 int error; 1166 int error;
1178 1167
1179 error = qd_get(sdp, user, id, &qd); 1168 error = qd_get(sdp, qid, &qd);
1180 if (error) 1169 if (error)
1181 return error; 1170 return error;
1182 1171
@@ -1194,7 +1183,9 @@ static void gfs2_quota_change_in(struct gfs2_quota_change_host *qc, const void *
1194 1183
1195 qc->qc_change = be64_to_cpu(str->qc_change); 1184 qc->qc_change = be64_to_cpu(str->qc_change);
1196 qc->qc_flags = be32_to_cpu(str->qc_flags); 1185 qc->qc_flags = be32_to_cpu(str->qc_flags);
1197 qc->qc_id = be32_to_cpu(str->qc_id); 1186 qc->qc_id = make_kqid(&init_user_ns,
1187 (qc->qc_flags & GFS2_QCF_USER)?USRQUOTA:GRPQUOTA,
1188 be32_to_cpu(str->qc_id));
1198} 1189}
1199 1190
1200int gfs2_quota_init(struct gfs2_sbd *sdp) 1191int gfs2_quota_init(struct gfs2_sbd *sdp)
@@ -1257,8 +1248,7 @@ int gfs2_quota_init(struct gfs2_sbd *sdp)
1257 if (!qc.qc_change) 1248 if (!qc.qc_change)
1258 continue; 1249 continue;
1259 1250
1260 error = qd_alloc(sdp, (qc.qc_flags & GFS2_QCF_USER), 1251 error = qd_alloc(sdp, qc.qc_id, &qd);
1261 qc.qc_id, &qd);
1262 if (error) { 1252 if (error) {
1263 brelse(bh); 1253 brelse(bh);
1264 goto fail; 1254 goto fail;
@@ -1485,21 +1475,17 @@ static int gfs2_get_dqblk(struct super_block *sb, struct kqid qid,
1485 struct gfs2_quota_data *qd; 1475 struct gfs2_quota_data *qd;
1486 struct gfs2_holder q_gh; 1476 struct gfs2_holder q_gh;
1487 int error; 1477 int error;
1488 int type;
1489 1478
1490 memset(fdq, 0, sizeof(struct fs_disk_quota)); 1479 memset(fdq, 0, sizeof(struct fs_disk_quota));
1491 1480
1492 if (sdp->sd_args.ar_quota == GFS2_QUOTA_OFF) 1481 if (sdp->sd_args.ar_quota == GFS2_QUOTA_OFF)
1493 return -ESRCH; /* Crazy XFS error code */ 1482 return -ESRCH; /* Crazy XFS error code */
1494 1483
1495 if (qid.type == USRQUOTA) 1484 if ((qid.type != USRQUOTA) &&
1496 type = QUOTA_USER; 1485 (qid.type != GRPQUOTA))
1497 else if (qid.type == GRPQUOTA)
1498 type = QUOTA_GROUP;
1499 else
1500 return -EINVAL; 1486 return -EINVAL;
1501 1487
1502 error = qd_get(sdp, type, from_kqid(&init_user_ns, qid), &qd); 1488 error = qd_get(sdp, qid, &qd);
1503 if (error) 1489 if (error)
1504 return error; 1490 return error;
1505 error = do_glock(qd, FORCE, &q_gh); 1491 error = do_glock(qd, FORCE, &q_gh);
@@ -1508,8 +1494,8 @@ static int gfs2_get_dqblk(struct super_block *sb, struct kqid qid,
1508 1494
1509 qlvb = (struct gfs2_quota_lvb *)qd->qd_gl->gl_lksb.sb_lvbptr; 1495 qlvb = (struct gfs2_quota_lvb *)qd->qd_gl->gl_lksb.sb_lvbptr;
1510 fdq->d_version = FS_DQUOT_VERSION; 1496 fdq->d_version = FS_DQUOT_VERSION;
1511 fdq->d_flags = (type == QUOTA_USER) ? FS_USER_QUOTA : FS_GROUP_QUOTA; 1497 fdq->d_flags = (qid.type == USRQUOTA) ? FS_USER_QUOTA : FS_GROUP_QUOTA;
1512 fdq->d_id = from_kqid(&init_user_ns, qid); 1498 fdq->d_id = from_kqid_munged(current_user_ns(), qid);
1513 fdq->d_blk_hardlimit = be64_to_cpu(qlvb->qb_limit) << sdp->sd_fsb2bb_shift; 1499 fdq->d_blk_hardlimit = be64_to_cpu(qlvb->qb_limit) << sdp->sd_fsb2bb_shift;
1514 fdq->d_blk_softlimit = be64_to_cpu(qlvb->qb_warn) << sdp->sd_fsb2bb_shift; 1500 fdq->d_blk_softlimit = be64_to_cpu(qlvb->qb_warn) << sdp->sd_fsb2bb_shift;
1515 fdq->d_bcount = be64_to_cpu(qlvb->qb_value) << sdp->sd_fsb2bb_shift; 1501 fdq->d_bcount = be64_to_cpu(qlvb->qb_value) << sdp->sd_fsb2bb_shift;
@@ -1535,32 +1521,18 @@ static int gfs2_set_dqblk(struct super_block *sb, struct kqid qid,
1535 int alloc_required; 1521 int alloc_required;
1536 loff_t offset; 1522 loff_t offset;
1537 int error; 1523 int error;
1538 int type;
1539 1524
1540 if (sdp->sd_args.ar_quota == GFS2_QUOTA_OFF) 1525 if (sdp->sd_args.ar_quota == GFS2_QUOTA_OFF)
1541 return -ESRCH; /* Crazy XFS error code */ 1526 return -ESRCH; /* Crazy XFS error code */
1542 1527
1543 switch(qid.type) { 1528 if ((qid.type != USRQUOTA) &&
1544 case USRQUOTA: 1529 (qid.type != GRPQUOTA))
1545 type = QUOTA_USER;
1546 if (fdq->d_flags != FS_USER_QUOTA)
1547 return -EINVAL;
1548 break;
1549 case GRPQUOTA:
1550 type = QUOTA_GROUP;
1551 if (fdq->d_flags != FS_GROUP_QUOTA)
1552 return -EINVAL;
1553 break;
1554 default:
1555 return -EINVAL; 1530 return -EINVAL;
1556 }
1557 1531
1558 if (fdq->d_fieldmask & ~GFS2_FIELDMASK) 1532 if (fdq->d_fieldmask & ~GFS2_FIELDMASK)
1559 return -EINVAL; 1533 return -EINVAL;
1560 if (fdq->d_id != from_kqid(&init_user_ns, qid))
1561 return -EINVAL;
1562 1534
1563 error = qd_get(sdp, type, from_kqid(&init_user_ns, qid), &qd); 1535 error = qd_get(sdp, qid, &qd);
1564 if (error) 1536 if (error)
1565 return error; 1537 return error;
1566 1538
diff --git a/fs/gfs2/quota.h b/fs/gfs2/quota.h
index f25d98b87904..4f5e6e44ed83 100644
--- a/fs/gfs2/quota.h
+++ b/fs/gfs2/quota.h
@@ -14,20 +14,21 @@ struct gfs2_inode;
14struct gfs2_sbd; 14struct gfs2_sbd;
15struct shrink_control; 15struct shrink_control;
16 16
17#define NO_QUOTA_CHANGE ((u32)-1) 17#define NO_UID_QUOTA_CHANGE INVALID_UID
18#define NO_GID_QUOTA_CHANGE INVALID_GID
18 19
19extern int gfs2_quota_hold(struct gfs2_inode *ip, u32 uid, u32 gid); 20extern int gfs2_quota_hold(struct gfs2_inode *ip, kuid_t uid, kgid_t gid);
20extern void gfs2_quota_unhold(struct gfs2_inode *ip); 21extern void gfs2_quota_unhold(struct gfs2_inode *ip);
21 22
22extern int gfs2_quota_lock(struct gfs2_inode *ip, u32 uid, u32 gid); 23extern int gfs2_quota_lock(struct gfs2_inode *ip, kuid_t uid, kgid_t gid);
23extern void gfs2_quota_unlock(struct gfs2_inode *ip); 24extern void gfs2_quota_unlock(struct gfs2_inode *ip);
24 25
25extern int gfs2_quota_check(struct gfs2_inode *ip, u32 uid, u32 gid); 26extern int gfs2_quota_check(struct gfs2_inode *ip, kuid_t uid, kgid_t gid);
26extern void gfs2_quota_change(struct gfs2_inode *ip, s64 change, 27extern void gfs2_quota_change(struct gfs2_inode *ip, s64 change,
27 u32 uid, u32 gid); 28 kuid_t uid, kgid_t gid);
28 29
29extern int gfs2_quota_sync(struct super_block *sb, int type); 30extern int gfs2_quota_sync(struct super_block *sb, int type);
30extern int gfs2_quota_refresh(struct gfs2_sbd *sdp, int user, u32 id); 31extern int gfs2_quota_refresh(struct gfs2_sbd *sdp, struct kqid qid);
31 32
32extern int gfs2_quota_init(struct gfs2_sbd *sdp); 33extern int gfs2_quota_init(struct gfs2_sbd *sdp);
33extern void gfs2_quota_cleanup(struct gfs2_sbd *sdp); 34extern void gfs2_quota_cleanup(struct gfs2_sbd *sdp);
@@ -41,7 +42,7 @@ static inline int gfs2_quota_lock_check(struct gfs2_inode *ip)
41 int ret; 42 int ret;
42 if (sdp->sd_args.ar_quota == GFS2_QUOTA_OFF) 43 if (sdp->sd_args.ar_quota == GFS2_QUOTA_OFF)
43 return 0; 44 return 0;
44 ret = gfs2_quota_lock(ip, NO_QUOTA_CHANGE, NO_QUOTA_CHANGE); 45 ret = gfs2_quota_lock(ip, NO_UID_QUOTA_CHANGE, NO_GID_QUOTA_CHANGE);
45 if (ret) 46 if (ret)
46 return ret; 47 return ret;
47 if (sdp->sd_args.ar_quota != GFS2_QUOTA_ON) 48 if (sdp->sd_args.ar_quota != GFS2_QUOTA_ON)
diff --git a/fs/gfs2/rgrp.c b/fs/gfs2/rgrp.c
index 04af1cf7ae34..d1f51fd73f86 100644
--- a/fs/gfs2/rgrp.c
+++ b/fs/gfs2/rgrp.c
@@ -1323,7 +1323,7 @@ int gfs2_fitrim(struct file *filp, void __user *argp)
1323 if (ret == 0) { 1323 if (ret == 0) {
1324 bh = rgd->rd_bits[0].bi_bh; 1324 bh = rgd->rd_bits[0].bi_bh;
1325 rgd->rd_flags |= GFS2_RGF_TRIMMED; 1325 rgd->rd_flags |= GFS2_RGF_TRIMMED;
1326 gfs2_trans_add_bh(rgd->rd_gl, bh, 1); 1326 gfs2_trans_add_meta(rgd->rd_gl, bh);
1327 gfs2_rgrp_out(rgd, bh->b_data); 1327 gfs2_rgrp_out(rgd, bh->b_data);
1328 gfs2_rgrp_ondisk2lvb(rgd->rd_rgl, bh->b_data); 1328 gfs2_rgrp_ondisk2lvb(rgd->rd_rgl, bh->b_data);
1329 gfs2_trans_end(sdp); 1329 gfs2_trans_end(sdp);
@@ -1968,14 +1968,14 @@ static void gfs2_alloc_extent(const struct gfs2_rbm *rbm, bool dinode,
1968 1968
1969 *n = 1; 1969 *n = 1;
1970 block = gfs2_rbm_to_block(rbm); 1970 block = gfs2_rbm_to_block(rbm);
1971 gfs2_trans_add_bh(rbm->rgd->rd_gl, rbm->bi->bi_bh, 1); 1971 gfs2_trans_add_meta(rbm->rgd->rd_gl, rbm->bi->bi_bh);
1972 gfs2_setbit(rbm, true, dinode ? GFS2_BLKST_DINODE : GFS2_BLKST_USED); 1972 gfs2_setbit(rbm, true, dinode ? GFS2_BLKST_DINODE : GFS2_BLKST_USED);
1973 block++; 1973 block++;
1974 while (*n < elen) { 1974 while (*n < elen) {
1975 ret = gfs2_rbm_from_block(&pos, block); 1975 ret = gfs2_rbm_from_block(&pos, block);
1976 if (ret || gfs2_testbit(&pos) != GFS2_BLKST_FREE) 1976 if (ret || gfs2_testbit(&pos) != GFS2_BLKST_FREE)
1977 break; 1977 break;
1978 gfs2_trans_add_bh(pos.rgd->rd_gl, pos.bi->bi_bh, 1); 1978 gfs2_trans_add_meta(pos.rgd->rd_gl, pos.bi->bi_bh);
1979 gfs2_setbit(&pos, true, GFS2_BLKST_USED); 1979 gfs2_setbit(&pos, true, GFS2_BLKST_USED);
1980 (*n)++; 1980 (*n)++;
1981 block++; 1981 block++;
@@ -2014,7 +2014,7 @@ static struct gfs2_rgrpd *rgblk_free(struct gfs2_sbd *sdp, u64 bstart,
2014 rbm.bi->bi_bh->b_data + rbm.bi->bi_offset, 2014 rbm.bi->bi_bh->b_data + rbm.bi->bi_offset,
2015 rbm.bi->bi_len); 2015 rbm.bi->bi_len);
2016 } 2016 }
2017 gfs2_trans_add_bh(rbm.rgd->rd_gl, rbm.bi->bi_bh, 1); 2017 gfs2_trans_add_meta(rbm.rgd->rd_gl, rbm.bi->bi_bh);
2018 gfs2_setbit(&rbm, false, new_state); 2018 gfs2_setbit(&rbm, false, new_state);
2019 } 2019 }
2020 2020
@@ -2157,7 +2157,7 @@ int gfs2_alloc_blocks(struct gfs2_inode *ip, u64 *bn, unsigned int *nblocks,
2157 if (error == 0) { 2157 if (error == 0) {
2158 struct gfs2_dinode *di = 2158 struct gfs2_dinode *di =
2159 (struct gfs2_dinode *)dibh->b_data; 2159 (struct gfs2_dinode *)dibh->b_data;
2160 gfs2_trans_add_bh(ip->i_gl, dibh, 1); 2160 gfs2_trans_add_meta(ip->i_gl, dibh);
2161 di->di_goal_meta = di->di_goal_data = 2161 di->di_goal_meta = di->di_goal_data =
2162 cpu_to_be64(ip->i_goal); 2162 cpu_to_be64(ip->i_goal);
2163 brelse(dibh); 2163 brelse(dibh);
@@ -2176,7 +2176,7 @@ int gfs2_alloc_blocks(struct gfs2_inode *ip, u64 *bn, unsigned int *nblocks,
2176 *generation = rbm.rgd->rd_igeneration++; 2176 *generation = rbm.rgd->rd_igeneration++;
2177 } 2177 }
2178 2178
2179 gfs2_trans_add_bh(rbm.rgd->rd_gl, rbm.rgd->rd_bits[0].bi_bh, 1); 2179 gfs2_trans_add_meta(rbm.rgd->rd_gl, rbm.rgd->rd_bits[0].bi_bh);
2180 gfs2_rgrp_out(rbm.rgd, rbm.rgd->rd_bits[0].bi_bh->b_data); 2180 gfs2_rgrp_out(rbm.rgd, rbm.rgd->rd_bits[0].bi_bh->b_data);
2181 gfs2_rgrp_ondisk2lvb(rbm.rgd->rd_rgl, rbm.rgd->rd_bits[0].bi_bh->b_data); 2181 gfs2_rgrp_ondisk2lvb(rbm.rgd->rd_rgl, rbm.rgd->rd_bits[0].bi_bh->b_data);
2182 2182
@@ -2223,7 +2223,7 @@ void __gfs2_free_blocks(struct gfs2_inode *ip, u64 bstart, u32 blen, int meta)
2223 trace_gfs2_block_alloc(ip, rgd, bstart, blen, GFS2_BLKST_FREE); 2223 trace_gfs2_block_alloc(ip, rgd, bstart, blen, GFS2_BLKST_FREE);
2224 rgd->rd_free += blen; 2224 rgd->rd_free += blen;
2225 rgd->rd_flags &= ~GFS2_RGF_TRIMMED; 2225 rgd->rd_flags &= ~GFS2_RGF_TRIMMED;
2226 gfs2_trans_add_bh(rgd->rd_gl, rgd->rd_bits[0].bi_bh, 1); 2226 gfs2_trans_add_meta(rgd->rd_gl, rgd->rd_bits[0].bi_bh);
2227 gfs2_rgrp_out(rgd, rgd->rd_bits[0].bi_bh->b_data); 2227 gfs2_rgrp_out(rgd, rgd->rd_bits[0].bi_bh->b_data);
2228 gfs2_rgrp_ondisk2lvb(rgd->rd_rgl, rgd->rd_bits[0].bi_bh->b_data); 2228 gfs2_rgrp_ondisk2lvb(rgd->rd_rgl, rgd->rd_bits[0].bi_bh->b_data);
2229 2229
@@ -2260,7 +2260,7 @@ void gfs2_unlink_di(struct inode *inode)
2260 if (!rgd) 2260 if (!rgd)
2261 return; 2261 return;
2262 trace_gfs2_block_alloc(ip, rgd, blkno, 1, GFS2_BLKST_UNLINKED); 2262 trace_gfs2_block_alloc(ip, rgd, blkno, 1, GFS2_BLKST_UNLINKED);
2263 gfs2_trans_add_bh(rgd->rd_gl, rgd->rd_bits[0].bi_bh, 1); 2263 gfs2_trans_add_meta(rgd->rd_gl, rgd->rd_bits[0].bi_bh);
2264 gfs2_rgrp_out(rgd, rgd->rd_bits[0].bi_bh->b_data); 2264 gfs2_rgrp_out(rgd, rgd->rd_bits[0].bi_bh->b_data);
2265 gfs2_rgrp_ondisk2lvb(rgd->rd_rgl, rgd->rd_bits[0].bi_bh->b_data); 2265 gfs2_rgrp_ondisk2lvb(rgd->rd_rgl, rgd->rd_bits[0].bi_bh->b_data);
2266 update_rgrp_lvb_unlinked(rgd, 1); 2266 update_rgrp_lvb_unlinked(rgd, 1);
@@ -2281,7 +2281,7 @@ static void gfs2_free_uninit_di(struct gfs2_rgrpd *rgd, u64 blkno)
2281 rgd->rd_dinodes--; 2281 rgd->rd_dinodes--;
2282 rgd->rd_free++; 2282 rgd->rd_free++;
2283 2283
2284 gfs2_trans_add_bh(rgd->rd_gl, rgd->rd_bits[0].bi_bh, 1); 2284 gfs2_trans_add_meta(rgd->rd_gl, rgd->rd_bits[0].bi_bh);
2285 gfs2_rgrp_out(rgd, rgd->rd_bits[0].bi_bh->b_data); 2285 gfs2_rgrp_out(rgd, rgd->rd_bits[0].bi_bh->b_data);
2286 gfs2_rgrp_ondisk2lvb(rgd->rd_rgl, rgd->rd_bits[0].bi_bh->b_data); 2286 gfs2_rgrp_ondisk2lvb(rgd->rd_rgl, rgd->rd_bits[0].bi_bh->b_data);
2287 update_rgrp_lvb_unlinked(rgd, -1); 2287 update_rgrp_lvb_unlinked(rgd, -1);
diff --git a/fs/gfs2/super.c b/fs/gfs2/super.c
index d6488674d916..cab77b8ba84f 100644
--- a/fs/gfs2/super.c
+++ b/fs/gfs2/super.c
@@ -500,7 +500,7 @@ void gfs2_statfs_change(struct gfs2_sbd *sdp, s64 total, s64 free,
500 if (error) 500 if (error)
501 return; 501 return;
502 502
503 gfs2_trans_add_bh(l_ip->i_gl, l_bh, 1); 503 gfs2_trans_add_meta(l_ip->i_gl, l_bh);
504 504
505 spin_lock(&sdp->sd_statfs_spin); 505 spin_lock(&sdp->sd_statfs_spin);
506 l_sc->sc_total += total; 506 l_sc->sc_total += total;
@@ -528,7 +528,7 @@ void update_statfs(struct gfs2_sbd *sdp, struct buffer_head *m_bh,
528 struct gfs2_statfs_change_host *m_sc = &sdp->sd_statfs_master; 528 struct gfs2_statfs_change_host *m_sc = &sdp->sd_statfs_master;
529 struct gfs2_statfs_change_host *l_sc = &sdp->sd_statfs_local; 529 struct gfs2_statfs_change_host *l_sc = &sdp->sd_statfs_local;
530 530
531 gfs2_trans_add_bh(l_ip->i_gl, l_bh, 1); 531 gfs2_trans_add_meta(l_ip->i_gl, l_bh);
532 532
533 spin_lock(&sdp->sd_statfs_spin); 533 spin_lock(&sdp->sd_statfs_spin);
534 m_sc->sc_total += l_sc->sc_total; 534 m_sc->sc_total += l_sc->sc_total;
@@ -539,7 +539,7 @@ void update_statfs(struct gfs2_sbd *sdp, struct buffer_head *m_bh,
539 0, sizeof(struct gfs2_statfs_change)); 539 0, sizeof(struct gfs2_statfs_change));
540 spin_unlock(&sdp->sd_statfs_spin); 540 spin_unlock(&sdp->sd_statfs_spin);
541 541
542 gfs2_trans_add_bh(m_ip->i_gl, m_bh, 1); 542 gfs2_trans_add_meta(m_ip->i_gl, m_bh);
543 gfs2_statfs_change_out(m_sc, m_bh->b_data + sizeof(struct gfs2_dinode)); 543 gfs2_statfs_change_out(m_sc, m_bh->b_data + sizeof(struct gfs2_dinode));
544} 544}
545 545
@@ -663,54 +663,6 @@ out:
663 return error; 663 return error;
664} 664}
665 665
666/**
667 * gfs2_freeze_fs - freezes the file system
668 * @sdp: the file system
669 *
670 * This function flushes data and meta data for all machines by
671 * acquiring the transaction log exclusively. All journals are
672 * ensured to be in a clean state as well.
673 *
674 * Returns: errno
675 */
676
677int gfs2_freeze_fs(struct gfs2_sbd *sdp)
678{
679 int error = 0;
680
681 mutex_lock(&sdp->sd_freeze_lock);
682
683 if (!sdp->sd_freeze_count++) {
684 error = gfs2_lock_fs_check_clean(sdp, &sdp->sd_freeze_gh);
685 if (error)
686 sdp->sd_freeze_count--;
687 }
688
689 mutex_unlock(&sdp->sd_freeze_lock);
690
691 return error;
692}
693
694/**
695 * gfs2_unfreeze_fs - unfreezes the file system
696 * @sdp: the file system
697 *
698 * This function allows the file system to proceed by unlocking
699 * the exclusively held transaction lock. Other GFS2 nodes are
700 * now free to acquire the lock shared and go on with their lives.
701 *
702 */
703
704void gfs2_unfreeze_fs(struct gfs2_sbd *sdp)
705{
706 mutex_lock(&sdp->sd_freeze_lock);
707
708 if (sdp->sd_freeze_count && !--sdp->sd_freeze_count)
709 gfs2_glock_dq_uninit(&sdp->sd_freeze_gh);
710
711 mutex_unlock(&sdp->sd_freeze_lock);
712}
713
714void gfs2_dinode_out(const struct gfs2_inode *ip, void *buf) 666void gfs2_dinode_out(const struct gfs2_inode *ip, void *buf)
715{ 667{
716 struct gfs2_dinode *str = buf; 668 struct gfs2_dinode *str = buf;
@@ -721,8 +673,8 @@ void gfs2_dinode_out(const struct gfs2_inode *ip, void *buf)
721 str->di_num.no_addr = cpu_to_be64(ip->i_no_addr); 673 str->di_num.no_addr = cpu_to_be64(ip->i_no_addr);
722 str->di_num.no_formal_ino = cpu_to_be64(ip->i_no_formal_ino); 674 str->di_num.no_formal_ino = cpu_to_be64(ip->i_no_formal_ino);
723 str->di_mode = cpu_to_be32(ip->i_inode.i_mode); 675 str->di_mode = cpu_to_be32(ip->i_inode.i_mode);
724 str->di_uid = cpu_to_be32(ip->i_inode.i_uid); 676 str->di_uid = cpu_to_be32(i_uid_read(&ip->i_inode));
725 str->di_gid = cpu_to_be32(ip->i_inode.i_gid); 677 str->di_gid = cpu_to_be32(i_gid_read(&ip->i_inode));
726 str->di_nlink = cpu_to_be32(ip->i_inode.i_nlink); 678 str->di_nlink = cpu_to_be32(ip->i_inode.i_nlink);
727 str->di_size = cpu_to_be64(i_size_read(&ip->i_inode)); 679 str->di_size = cpu_to_be64(i_size_read(&ip->i_inode));
728 str->di_blocks = cpu_to_be64(gfs2_get_inode_blocks(&ip->i_inode)); 680 str->di_blocks = cpu_to_be64(gfs2_get_inode_blocks(&ip->i_inode));
@@ -824,7 +776,7 @@ static void gfs2_dirty_inode(struct inode *inode, int flags)
824 776
825 ret = gfs2_meta_inode_buffer(ip, &bh); 777 ret = gfs2_meta_inode_buffer(ip, &bh);
826 if (ret == 0) { 778 if (ret == 0) {
827 gfs2_trans_add_bh(ip->i_gl, bh, 1); 779 gfs2_trans_add_meta(ip->i_gl, bh);
828 gfs2_dinode_out(ip, bh->b_data); 780 gfs2_dinode_out(ip, bh->b_data);
829 brelse(bh); 781 brelse(bh);
830 } 782 }
@@ -888,13 +840,6 @@ static void gfs2_put_super(struct super_block *sb)
888 int error; 840 int error;
889 struct gfs2_jdesc *jd; 841 struct gfs2_jdesc *jd;
890 842
891 /* Unfreeze the filesystem, if we need to */
892
893 mutex_lock(&sdp->sd_freeze_lock);
894 if (sdp->sd_freeze_count)
895 gfs2_glock_dq_uninit(&sdp->sd_freeze_gh);
896 mutex_unlock(&sdp->sd_freeze_lock);
897
898 /* No more recovery requests */ 843 /* No more recovery requests */
899 set_bit(SDF_NORECOVERY, &sdp->sd_flags); 844 set_bit(SDF_NORECOVERY, &sdp->sd_flags);
900 smp_mb(); 845 smp_mb();
@@ -985,7 +930,7 @@ static int gfs2_freeze(struct super_block *sb)
985 return -EINVAL; 930 return -EINVAL;
986 931
987 for (;;) { 932 for (;;) {
988 error = gfs2_freeze_fs(sdp); 933 error = gfs2_lock_fs_check_clean(sdp, &sdp->sd_freeze_gh);
989 if (!error) 934 if (!error)
990 break; 935 break;
991 936
@@ -1013,7 +958,9 @@ static int gfs2_freeze(struct super_block *sb)
1013 958
1014static int gfs2_unfreeze(struct super_block *sb) 959static int gfs2_unfreeze(struct super_block *sb)
1015{ 960{
1016 gfs2_unfreeze_fs(sb->s_fs_info); 961 struct gfs2_sbd *sdp = sb->s_fs_info;
962
963 gfs2_glock_dq_uninit(&sdp->sd_freeze_gh);
1017 return 0; 964 return 0;
1018} 965}
1019 966
@@ -1429,7 +1376,7 @@ static int gfs2_dinode_dealloc(struct gfs2_inode *ip)
1429 if (error) 1376 if (error)
1430 return error; 1377 return error;
1431 1378
1432 error = gfs2_quota_hold(ip, NO_QUOTA_CHANGE, NO_QUOTA_CHANGE); 1379 error = gfs2_quota_hold(ip, NO_UID_QUOTA_CHANGE, NO_GID_QUOTA_CHANGE);
1433 if (error) 1380 if (error)
1434 return error; 1381 return error;
1435 1382
@@ -1577,6 +1524,7 @@ out:
1577 /* Case 3 starts here */ 1524 /* Case 3 starts here */
1578 truncate_inode_pages(&inode->i_data, 0); 1525 truncate_inode_pages(&inode->i_data, 0);
1579 gfs2_rs_delete(ip); 1526 gfs2_rs_delete(ip);
1527 gfs2_ordered_del_inode(ip);
1580 clear_inode(inode); 1528 clear_inode(inode);
1581 gfs2_dir_hash_inval(ip); 1529 gfs2_dir_hash_inval(ip);
1582 ip->i_gl->gl_object = NULL; 1530 ip->i_gl->gl_object = NULL;
diff --git a/fs/gfs2/super.h b/fs/gfs2/super.h
index a0464680af0b..90e3322ffa10 100644
--- a/fs/gfs2/super.h
+++ b/fs/gfs2/super.h
@@ -46,9 +46,6 @@ extern void update_statfs(struct gfs2_sbd *sdp, struct buffer_head *m_bh,
46 struct buffer_head *l_bh); 46 struct buffer_head *l_bh);
47extern int gfs2_statfs_sync(struct super_block *sb, int type); 47extern int gfs2_statfs_sync(struct super_block *sb, int type);
48 48
49extern int gfs2_freeze_fs(struct gfs2_sbd *sdp);
50extern void gfs2_unfreeze_fs(struct gfs2_sbd *sdp);
51
52extern struct file_system_type gfs2_fs_type; 49extern struct file_system_type gfs2_fs_type;
53extern struct file_system_type gfs2meta_fs_type; 50extern struct file_system_type gfs2meta_fs_type;
54extern const struct export_operations gfs2_export_ops; 51extern const struct export_operations gfs2_export_ops;
diff --git a/fs/gfs2/sys.c b/fs/gfs2/sys.c
index 0acbe2ff1e5d..aa5c48044966 100644
--- a/fs/gfs2/sys.c
+++ b/fs/gfs2/sys.c
@@ -91,19 +91,15 @@ static ssize_t uuid_show(struct gfs2_sbd *sdp, char *buf)
91 91
92static ssize_t freeze_show(struct gfs2_sbd *sdp, char *buf) 92static ssize_t freeze_show(struct gfs2_sbd *sdp, char *buf)
93{ 93{
94 unsigned int count; 94 struct super_block *sb = sdp->sd_vfs;
95 95 int frozen = (sb->s_writers.frozen == SB_UNFROZEN) ? 0 : 1;
96 mutex_lock(&sdp->sd_freeze_lock);
97 count = sdp->sd_freeze_count;
98 mutex_unlock(&sdp->sd_freeze_lock);
99 96
100 return snprintf(buf, PAGE_SIZE, "%u\n", count); 97 return snprintf(buf, PAGE_SIZE, "%u\n", frozen);
101} 98}
102 99
103static ssize_t freeze_store(struct gfs2_sbd *sdp, const char *buf, size_t len) 100static ssize_t freeze_store(struct gfs2_sbd *sdp, const char *buf, size_t len)
104{ 101{
105 ssize_t ret = len; 102 int error;
106 int error = 0;
107 int n = simple_strtol(buf, NULL, 0); 103 int n = simple_strtol(buf, NULL, 0);
108 104
109 if (!capable(CAP_SYS_ADMIN)) 105 if (!capable(CAP_SYS_ADMIN))
@@ -111,19 +107,21 @@ static ssize_t freeze_store(struct gfs2_sbd *sdp, const char *buf, size_t len)
111 107
112 switch (n) { 108 switch (n) {
113 case 0: 109 case 0:
114 gfs2_unfreeze_fs(sdp); 110 error = thaw_super(sdp->sd_vfs);
115 break; 111 break;
116 case 1: 112 case 1:
117 error = gfs2_freeze_fs(sdp); 113 error = freeze_super(sdp->sd_vfs);
118 break; 114 break;
119 default: 115 default:
120 ret = -EINVAL; 116 return -EINVAL;
121 } 117 }
122 118
123 if (error) 119 if (error) {
124 fs_warn(sdp, "freeze %d error %d", n, error); 120 fs_warn(sdp, "freeze %d error %d", n, error);
121 return error;
122 }
125 123
126 return ret; 124 return len;
127} 125}
128 126
129static ssize_t withdraw_show(struct gfs2_sbd *sdp, char *buf) 127static ssize_t withdraw_show(struct gfs2_sbd *sdp, char *buf)
@@ -175,6 +173,7 @@ static ssize_t quota_sync_store(struct gfs2_sbd *sdp, const char *buf,
175static ssize_t quota_refresh_user_store(struct gfs2_sbd *sdp, const char *buf, 173static ssize_t quota_refresh_user_store(struct gfs2_sbd *sdp, const char *buf,
176 size_t len) 174 size_t len)
177{ 175{
176 struct kqid qid;
178 int error; 177 int error;
179 u32 id; 178 u32 id;
180 179
@@ -183,13 +182,18 @@ static ssize_t quota_refresh_user_store(struct gfs2_sbd *sdp, const char *buf,
183 182
184 id = simple_strtoul(buf, NULL, 0); 183 id = simple_strtoul(buf, NULL, 0);
185 184
186 error = gfs2_quota_refresh(sdp, 1, id); 185 qid = make_kqid(current_user_ns(), USRQUOTA, id);
186 if (!qid_valid(qid))
187 return -EINVAL;
188
189 error = gfs2_quota_refresh(sdp, qid);
187 return error ? error : len; 190 return error ? error : len;
188} 191}
189 192
190static ssize_t quota_refresh_group_store(struct gfs2_sbd *sdp, const char *buf, 193static ssize_t quota_refresh_group_store(struct gfs2_sbd *sdp, const char *buf,
191 size_t len) 194 size_t len)
192{ 195{
196 struct kqid qid;
193 int error; 197 int error;
194 u32 id; 198 u32 id;
195 199
@@ -198,7 +202,11 @@ static ssize_t quota_refresh_group_store(struct gfs2_sbd *sdp, const char *buf,
198 202
199 id = simple_strtoul(buf, NULL, 0); 203 id = simple_strtoul(buf, NULL, 0);
200 204
201 error = gfs2_quota_refresh(sdp, 0, id); 205 qid = make_kqid(current_user_ns(), GRPQUOTA, id);
206 if (!qid_valid(qid))
207 return -EINVAL;
208
209 error = gfs2_quota_refresh(sdp, qid);
202 return error ? error : len; 210 return error ? error : len;
203} 211}
204 212
@@ -332,6 +340,28 @@ static ssize_t block_store(struct gfs2_sbd *sdp, const char *buf, size_t len)
332 return ret; 340 return ret;
333} 341}
334 342
343static ssize_t wdack_show(struct gfs2_sbd *sdp, char *buf)
344{
345 int val = completion_done(&sdp->sd_wdack) ? 1 : 0;
346
347 return sprintf(buf, "%d\n", val);
348}
349
350static ssize_t wdack_store(struct gfs2_sbd *sdp, const char *buf, size_t len)
351{
352 ssize_t ret = len;
353 int val;
354
355 val = simple_strtol(buf, NULL, 0);
356
357 if ((val == 1) &&
358 !strcmp(sdp->sd_lockstruct.ls_ops->lm_proto_name, "lock_dlm"))
359 complete(&sdp->sd_wdack);
360 else
361 ret = -EINVAL;
362 return ret;
363}
364
335static ssize_t lkfirst_show(struct gfs2_sbd *sdp, char *buf) 365static ssize_t lkfirst_show(struct gfs2_sbd *sdp, char *buf)
336{ 366{
337 struct lm_lockstruct *ls = &sdp->sd_lockstruct; 367 struct lm_lockstruct *ls = &sdp->sd_lockstruct;
@@ -463,7 +493,7 @@ static struct gfs2_attr gdlm_attr_##_name = __ATTR(_name,_mode,_show,_store)
463 493
464GDLM_ATTR(proto_name, 0444, proto_name_show, NULL); 494GDLM_ATTR(proto_name, 0444, proto_name_show, NULL);
465GDLM_ATTR(block, 0644, block_show, block_store); 495GDLM_ATTR(block, 0644, block_show, block_store);
466GDLM_ATTR(withdraw, 0644, withdraw_show, withdraw_store); 496GDLM_ATTR(withdraw, 0644, wdack_show, wdack_store);
467GDLM_ATTR(jid, 0644, jid_show, jid_store); 497GDLM_ATTR(jid, 0644, jid_show, jid_store);
468GDLM_ATTR(first, 0644, lkfirst_show, lkfirst_store); 498GDLM_ATTR(first, 0644, lkfirst_show, lkfirst_store);
469GDLM_ATTR(first_done, 0444, first_done_show, NULL); 499GDLM_ATTR(first_done, 0444, first_done_show, NULL);
diff --git a/fs/gfs2/trans.c b/fs/gfs2/trans.c
index 413627072f36..88162fae27a5 100644
--- a/fs/gfs2/trans.c
+++ b/fs/gfs2/trans.c
@@ -18,6 +18,7 @@
18#include "gfs2.h" 18#include "gfs2.h"
19#include "incore.h" 19#include "incore.h"
20#include "glock.h" 20#include "glock.h"
21#include "inode.h"
21#include "log.h" 22#include "log.h"
22#include "lops.h" 23#include "lops.h"
23#include "meta_io.h" 24#include "meta_io.h"
@@ -142,44 +143,143 @@ void gfs2_trans_end(struct gfs2_sbd *sdp)
142 sb_end_intwrite(sdp->sd_vfs); 143 sb_end_intwrite(sdp->sd_vfs);
143} 144}
144 145
146static struct gfs2_bufdata *gfs2_alloc_bufdata(struct gfs2_glock *gl,
147 struct buffer_head *bh,
148 const struct gfs2_log_operations *lops)
149{
150 struct gfs2_bufdata *bd;
151
152 bd = kmem_cache_zalloc(gfs2_bufdata_cachep, GFP_NOFS | __GFP_NOFAIL);
153 bd->bd_bh = bh;
154 bd->bd_gl = gl;
155 bd->bd_ops = lops;
156 INIT_LIST_HEAD(&bd->bd_list);
157 bh->b_private = bd;
158 return bd;
159}
160
145/** 161/**
146 * gfs2_trans_add_bh - Add a to-be-modified buffer to the current transaction 162 * gfs2_trans_add_data - Add a databuf to the transaction.
147 * @gl: the glock the buffer belongs to 163 * @gl: The inode glock associated with the buffer
148 * @bh: The buffer to add 164 * @bh: The buffer to add
149 * @meta: True in the case of adding metadata
150 * 165 *
166 * This is used in two distinct cases:
167 * i) In ordered write mode
168 * We put the data buffer on a list so that we can ensure that its
169 * synced to disk at the right time
170 * ii) In journaled data mode
171 * We need to journal the data block in the same way as metadata in
172 * the functions above. The difference is that here we have a tag
173 * which is two __be64's being the block number (as per meta data)
174 * and a flag which says whether the data block needs escaping or
175 * not. This means we need a new log entry for each 251 or so data
176 * blocks, which isn't an enormous overhead but twice as much as
177 * for normal metadata blocks.
151 */ 178 */
179void gfs2_trans_add_data(struct gfs2_glock *gl, struct buffer_head *bh)
180{
181 struct gfs2_trans *tr = current->journal_info;
182 struct gfs2_sbd *sdp = gl->gl_sbd;
183 struct address_space *mapping = bh->b_page->mapping;
184 struct gfs2_inode *ip = GFS2_I(mapping->host);
185 struct gfs2_bufdata *bd;
152 186
153void gfs2_trans_add_bh(struct gfs2_glock *gl, struct buffer_head *bh, int meta) 187 if (!gfs2_is_jdata(ip)) {
188 gfs2_ordered_add_inode(ip);
189 return;
190 }
191
192 lock_buffer(bh);
193 gfs2_log_lock(sdp);
194 bd = bh->b_private;
195 if (bd == NULL) {
196 gfs2_log_unlock(sdp);
197 unlock_buffer(bh);
198 if (bh->b_private == NULL)
199 bd = gfs2_alloc_bufdata(gl, bh, &gfs2_databuf_lops);
200 lock_buffer(bh);
201 gfs2_log_lock(sdp);
202 }
203 gfs2_assert(sdp, bd->bd_gl == gl);
204 tr->tr_touched = 1;
205 if (list_empty(&bd->bd_list)) {
206 set_bit(GLF_LFLUSH, &bd->bd_gl->gl_flags);
207 set_bit(GLF_DIRTY, &bd->bd_gl->gl_flags);
208 gfs2_pin(sdp, bd->bd_bh);
209 tr->tr_num_databuf_new++;
210 sdp->sd_log_num_databuf++;
211 list_add_tail(&bd->bd_list, &sdp->sd_log_le_databuf);
212 }
213 gfs2_log_unlock(sdp);
214 unlock_buffer(bh);
215}
216
217static void meta_lo_add(struct gfs2_sbd *sdp, struct gfs2_bufdata *bd)
154{ 218{
219 struct gfs2_meta_header *mh;
220 struct gfs2_trans *tr;
221
222 tr = current->journal_info;
223 tr->tr_touched = 1;
224 if (!list_empty(&bd->bd_list))
225 return;
226 set_bit(GLF_LFLUSH, &bd->bd_gl->gl_flags);
227 set_bit(GLF_DIRTY, &bd->bd_gl->gl_flags);
228 mh = (struct gfs2_meta_header *)bd->bd_bh->b_data;
229 if (unlikely(mh->mh_magic != cpu_to_be32(GFS2_MAGIC))) {
230 printk(KERN_ERR
231 "Attempting to add uninitialised block to journal (inplace block=%lld)\n",
232 (unsigned long long)bd->bd_bh->b_blocknr);
233 BUG();
234 }
235 gfs2_pin(sdp, bd->bd_bh);
236 mh->__pad0 = cpu_to_be64(0);
237 mh->mh_jid = cpu_to_be32(sdp->sd_jdesc->jd_jid);
238 sdp->sd_log_num_buf++;
239 list_add(&bd->bd_list, &sdp->sd_log_le_buf);
240 tr->tr_num_buf_new++;
241}
242
243void gfs2_trans_add_meta(struct gfs2_glock *gl, struct buffer_head *bh)
244{
245
155 struct gfs2_sbd *sdp = gl->gl_sbd; 246 struct gfs2_sbd *sdp = gl->gl_sbd;
156 struct gfs2_bufdata *bd; 247 struct gfs2_bufdata *bd;
157 248
158 lock_buffer(bh); 249 lock_buffer(bh);
159 gfs2_log_lock(sdp); 250 gfs2_log_lock(sdp);
160 bd = bh->b_private; 251 bd = bh->b_private;
161 if (bd) 252 if (bd == NULL) {
162 gfs2_assert(sdp, bd->bd_gl == gl);
163 else {
164 gfs2_log_unlock(sdp); 253 gfs2_log_unlock(sdp);
165 unlock_buffer(bh); 254 unlock_buffer(bh);
166 gfs2_attach_bufdata(gl, bh, meta); 255 lock_page(bh->b_page);
167 bd = bh->b_private; 256 if (bh->b_private == NULL)
257 bd = gfs2_alloc_bufdata(gl, bh, &gfs2_buf_lops);
258 unlock_page(bh->b_page);
168 lock_buffer(bh); 259 lock_buffer(bh);
169 gfs2_log_lock(sdp); 260 gfs2_log_lock(sdp);
170 } 261 }
171 lops_add(sdp, bd); 262 gfs2_assert(sdp, bd->bd_gl == gl);
263 meta_lo_add(sdp, bd);
172 gfs2_log_unlock(sdp); 264 gfs2_log_unlock(sdp);
173 unlock_buffer(bh); 265 unlock_buffer(bh);
174} 266}
175 267
176void gfs2_trans_add_revoke(struct gfs2_sbd *sdp, struct gfs2_bufdata *bd) 268void gfs2_trans_add_revoke(struct gfs2_sbd *sdp, struct gfs2_bufdata *bd)
177{ 269{
270 struct gfs2_glock *gl = bd->bd_gl;
271 struct gfs2_trans *tr = current->journal_info;
272
178 BUG_ON(!list_empty(&bd->bd_list)); 273 BUG_ON(!list_empty(&bd->bd_list));
179 BUG_ON(!list_empty(&bd->bd_ail_st_list)); 274 BUG_ON(!list_empty(&bd->bd_ail_st_list));
180 BUG_ON(!list_empty(&bd->bd_ail_gl_list)); 275 BUG_ON(!list_empty(&bd->bd_ail_gl_list));
181 lops_init_le(bd, &gfs2_revoke_lops); 276 bd->bd_ops = &gfs2_revoke_lops;
182 lops_add(sdp, bd); 277 tr->tr_touched = 1;
278 tr->tr_num_revoke++;
279 sdp->sd_log_num_revoke++;
280 atomic_inc(&gl->gl_revokes);
281 set_bit(GLF_LFLUSH, &gl->gl_flags);
282 list_add(&bd->bd_list, &sdp->sd_log_le_revoke);
183} 283}
184 284
185void gfs2_trans_add_unrevoke(struct gfs2_sbd *sdp, u64 blkno, unsigned int len) 285void gfs2_trans_add_unrevoke(struct gfs2_sbd *sdp, u64 blkno, unsigned int len)
diff --git a/fs/gfs2/trans.h b/fs/gfs2/trans.h
index bf2ae9aeee7a..1e6e7da25a17 100644
--- a/fs/gfs2/trans.h
+++ b/fs/gfs2/trans.h
@@ -39,7 +39,8 @@ extern int gfs2_trans_begin(struct gfs2_sbd *sdp, unsigned int blocks,
39 unsigned int revokes); 39 unsigned int revokes);
40 40
41extern void gfs2_trans_end(struct gfs2_sbd *sdp); 41extern void gfs2_trans_end(struct gfs2_sbd *sdp);
42extern void gfs2_trans_add_bh(struct gfs2_glock *gl, struct buffer_head *bh, int meta); 42extern void gfs2_trans_add_data(struct gfs2_glock *gl, struct buffer_head *bh);
43extern void gfs2_trans_add_meta(struct gfs2_glock *gl, struct buffer_head *bh);
43extern void gfs2_trans_add_revoke(struct gfs2_sbd *sdp, struct gfs2_bufdata *bd); 44extern void gfs2_trans_add_revoke(struct gfs2_sbd *sdp, struct gfs2_bufdata *bd);
44extern void gfs2_trans_add_unrevoke(struct gfs2_sbd *sdp, u64 blkno, unsigned int len); 45extern void gfs2_trans_add_unrevoke(struct gfs2_sbd *sdp, u64 blkno, unsigned int len);
45 46
diff --git a/fs/gfs2/util.c b/fs/gfs2/util.c
index f00d7c5744f6..6402fb69d71b 100644
--- a/fs/gfs2/util.c
+++ b/fs/gfs2/util.c
@@ -54,6 +54,9 @@ int gfs2_lm_withdraw(struct gfs2_sbd *sdp, char *fmt, ...)
54 54
55 kobject_uevent(&sdp->sd_kobj, KOBJ_OFFLINE); 55 kobject_uevent(&sdp->sd_kobj, KOBJ_OFFLINE);
56 56
57 if (!strcmp(sdp->sd_lockstruct.ls_ops->lm_proto_name, "lock_dlm"))
58 wait_for_completion(&sdp->sd_wdack);
59
57 if (lm->lm_unmount) { 60 if (lm->lm_unmount) {
58 fs_err(sdp, "telling LM to unmount\n"); 61 fs_err(sdp, "telling LM to unmount\n");
59 lm->lm_unmount(sdp); 62 lm->lm_unmount(sdp);
diff --git a/fs/gfs2/xattr.c b/fs/gfs2/xattr.c
index 76c144b3c9bb..ecd37f30ab91 100644
--- a/fs/gfs2/xattr.c
+++ b/fs/gfs2/xattr.c
@@ -270,7 +270,7 @@ static int ea_dealloc_unstuffed(struct gfs2_inode *ip, struct buffer_head *bh,
270 if (error) 270 if (error)
271 goto out_gunlock; 271 goto out_gunlock;
272 272
273 gfs2_trans_add_bh(ip->i_gl, bh, 1); 273 gfs2_trans_add_meta(ip->i_gl, bh);
274 274
275 dataptrs = GFS2_EA2DATAPTRS(ea); 275 dataptrs = GFS2_EA2DATAPTRS(ea);
276 for (x = 0; x < ea->ea_num_ptrs; x++, dataptrs++) { 276 for (x = 0; x < ea->ea_num_ptrs; x++, dataptrs++) {
@@ -309,7 +309,7 @@ static int ea_dealloc_unstuffed(struct gfs2_inode *ip, struct buffer_head *bh,
309 error = gfs2_meta_inode_buffer(ip, &dibh); 309 error = gfs2_meta_inode_buffer(ip, &dibh);
310 if (!error) { 310 if (!error) {
311 ip->i_inode.i_ctime = CURRENT_TIME; 311 ip->i_inode.i_ctime = CURRENT_TIME;
312 gfs2_trans_add_bh(ip->i_gl, dibh, 1); 312 gfs2_trans_add_meta(ip->i_gl, dibh);
313 gfs2_dinode_out(ip, dibh->b_data); 313 gfs2_dinode_out(ip, dibh->b_data);
314 brelse(dibh); 314 brelse(dibh);
315 } 315 }
@@ -331,7 +331,7 @@ static int ea_remove_unstuffed(struct gfs2_inode *ip, struct buffer_head *bh,
331 if (error) 331 if (error)
332 return error; 332 return error;
333 333
334 error = gfs2_quota_hold(ip, NO_QUOTA_CHANGE, NO_QUOTA_CHANGE); 334 error = gfs2_quota_hold(ip, NO_UID_QUOTA_CHANGE, NO_GID_QUOTA_CHANGE);
335 if (error) 335 if (error)
336 goto out_alloc; 336 goto out_alloc;
337 337
@@ -509,7 +509,7 @@ static int gfs2_iter_unstuffed(struct gfs2_inode *ip, struct gfs2_ea_header *ea,
509 } 509 }
510 510
511 if (din) { 511 if (din) {
512 gfs2_trans_add_bh(ip->i_gl, bh[x], 1); 512 gfs2_trans_add_meta(ip->i_gl, bh[x]);
513 memcpy(pos, din, cp_size); 513 memcpy(pos, din, cp_size);
514 din += sdp->sd_jbsize; 514 din += sdp->sd_jbsize;
515 } 515 }
@@ -629,7 +629,7 @@ static int ea_alloc_blk(struct gfs2_inode *ip, struct buffer_head **bhp)
629 return error; 629 return error;
630 gfs2_trans_add_unrevoke(sdp, block, 1); 630 gfs2_trans_add_unrevoke(sdp, block, 1);
631 *bhp = gfs2_meta_new(ip->i_gl, block); 631 *bhp = gfs2_meta_new(ip->i_gl, block);
632 gfs2_trans_add_bh(ip->i_gl, *bhp, 1); 632 gfs2_trans_add_meta(ip->i_gl, *bhp);
633 gfs2_metatype_set(*bhp, GFS2_METATYPE_EA, GFS2_FORMAT_EA); 633 gfs2_metatype_set(*bhp, GFS2_METATYPE_EA, GFS2_FORMAT_EA);
634 gfs2_buffer_clear_tail(*bhp, sizeof(struct gfs2_meta_header)); 634 gfs2_buffer_clear_tail(*bhp, sizeof(struct gfs2_meta_header));
635 635
@@ -691,7 +691,7 @@ static int ea_write(struct gfs2_inode *ip, struct gfs2_ea_header *ea,
691 return error; 691 return error;
692 gfs2_trans_add_unrevoke(sdp, block, 1); 692 gfs2_trans_add_unrevoke(sdp, block, 1);
693 bh = gfs2_meta_new(ip->i_gl, block); 693 bh = gfs2_meta_new(ip->i_gl, block);
694 gfs2_trans_add_bh(ip->i_gl, bh, 1); 694 gfs2_trans_add_meta(ip->i_gl, bh);
695 gfs2_metatype_set(bh, GFS2_METATYPE_ED, GFS2_FORMAT_ED); 695 gfs2_metatype_set(bh, GFS2_METATYPE_ED, GFS2_FORMAT_ED);
696 696
697 gfs2_add_inode_blocks(&ip->i_inode, 1); 697 gfs2_add_inode_blocks(&ip->i_inode, 1);
@@ -751,7 +751,7 @@ static int ea_alloc_skeleton(struct gfs2_inode *ip, struct gfs2_ea_request *er,
751 error = gfs2_meta_inode_buffer(ip, &dibh); 751 error = gfs2_meta_inode_buffer(ip, &dibh);
752 if (!error) { 752 if (!error) {
753 ip->i_inode.i_ctime = CURRENT_TIME; 753 ip->i_inode.i_ctime = CURRENT_TIME;
754 gfs2_trans_add_bh(ip->i_gl, dibh, 1); 754 gfs2_trans_add_meta(ip->i_gl, dibh);
755 gfs2_dinode_out(ip, dibh->b_data); 755 gfs2_dinode_out(ip, dibh->b_data);
756 brelse(dibh); 756 brelse(dibh);
757 } 757 }
@@ -834,7 +834,7 @@ static void ea_set_remove_stuffed(struct gfs2_inode *ip,
834 struct gfs2_ea_header *prev = el->el_prev; 834 struct gfs2_ea_header *prev = el->el_prev;
835 u32 len; 835 u32 len;
836 836
837 gfs2_trans_add_bh(ip->i_gl, el->el_bh, 1); 837 gfs2_trans_add_meta(ip->i_gl, el->el_bh);
838 838
839 if (!prev || !GFS2_EA_IS_STUFFED(ea)) { 839 if (!prev || !GFS2_EA_IS_STUFFED(ea)) {
840 ea->ea_type = GFS2_EATYPE_UNUSED; 840 ea->ea_type = GFS2_EATYPE_UNUSED;
@@ -872,7 +872,7 @@ static int ea_set_simple_noalloc(struct gfs2_inode *ip, struct buffer_head *bh,
872 if (error) 872 if (error)
873 return error; 873 return error;
874 874
875 gfs2_trans_add_bh(ip->i_gl, bh, 1); 875 gfs2_trans_add_meta(ip->i_gl, bh);
876 876
877 if (es->ea_split) 877 if (es->ea_split)
878 ea = ea_split_ea(ea); 878 ea = ea_split_ea(ea);
@@ -886,7 +886,7 @@ static int ea_set_simple_noalloc(struct gfs2_inode *ip, struct buffer_head *bh,
886 if (error) 886 if (error)
887 goto out; 887 goto out;
888 ip->i_inode.i_ctime = CURRENT_TIME; 888 ip->i_inode.i_ctime = CURRENT_TIME;
889 gfs2_trans_add_bh(ip->i_gl, dibh, 1); 889 gfs2_trans_add_meta(ip->i_gl, dibh);
890 gfs2_dinode_out(ip, dibh->b_data); 890 gfs2_dinode_out(ip, dibh->b_data);
891 brelse(dibh); 891 brelse(dibh);
892out: 892out:
@@ -901,7 +901,7 @@ static int ea_set_simple_alloc(struct gfs2_inode *ip,
901 struct gfs2_ea_header *ea = es->es_ea; 901 struct gfs2_ea_header *ea = es->es_ea;
902 int error; 902 int error;
903 903
904 gfs2_trans_add_bh(ip->i_gl, es->es_bh, 1); 904 gfs2_trans_add_meta(ip->i_gl, es->es_bh);
905 905
906 if (es->ea_split) 906 if (es->ea_split)
907 ea = ea_split_ea(ea); 907 ea = ea_split_ea(ea);
@@ -997,7 +997,7 @@ static int ea_set_block(struct gfs2_inode *ip, struct gfs2_ea_request *er,
997 goto out; 997 goto out;
998 } 998 }
999 999
1000 gfs2_trans_add_bh(ip->i_gl, indbh, 1); 1000 gfs2_trans_add_meta(ip->i_gl, indbh);
1001 } else { 1001 } else {
1002 u64 blk; 1002 u64 blk;
1003 unsigned int n = 1; 1003 unsigned int n = 1;
@@ -1006,7 +1006,7 @@ static int ea_set_block(struct gfs2_inode *ip, struct gfs2_ea_request *er,
1006 return error; 1006 return error;
1007 gfs2_trans_add_unrevoke(sdp, blk, 1); 1007 gfs2_trans_add_unrevoke(sdp, blk, 1);
1008 indbh = gfs2_meta_new(ip->i_gl, blk); 1008 indbh = gfs2_meta_new(ip->i_gl, blk);
1009 gfs2_trans_add_bh(ip->i_gl, indbh, 1); 1009 gfs2_trans_add_meta(ip->i_gl, indbh);
1010 gfs2_metatype_set(indbh, GFS2_METATYPE_IN, GFS2_FORMAT_IN); 1010 gfs2_metatype_set(indbh, GFS2_METATYPE_IN, GFS2_FORMAT_IN);
1011 gfs2_buffer_clear_tail(indbh, mh_size); 1011 gfs2_buffer_clear_tail(indbh, mh_size);
1012 1012
@@ -1092,7 +1092,7 @@ static int ea_remove_stuffed(struct gfs2_inode *ip, struct gfs2_ea_location *el)
1092 if (error) 1092 if (error)
1093 return error; 1093 return error;
1094 1094
1095 gfs2_trans_add_bh(ip->i_gl, el->el_bh, 1); 1095 gfs2_trans_add_meta(ip->i_gl, el->el_bh);
1096 1096
1097 if (prev) { 1097 if (prev) {
1098 u32 len; 1098 u32 len;
@@ -1109,7 +1109,7 @@ static int ea_remove_stuffed(struct gfs2_inode *ip, struct gfs2_ea_location *el)
1109 error = gfs2_meta_inode_buffer(ip, &dibh); 1109 error = gfs2_meta_inode_buffer(ip, &dibh);
1110 if (!error) { 1110 if (!error) {
1111 ip->i_inode.i_ctime = CURRENT_TIME; 1111 ip->i_inode.i_ctime = CURRENT_TIME;
1112 gfs2_trans_add_bh(ip->i_gl, dibh, 1); 1112 gfs2_trans_add_meta(ip->i_gl, dibh);
1113 gfs2_dinode_out(ip, dibh->b_data); 1113 gfs2_dinode_out(ip, dibh->b_data);
1114 brelse(dibh); 1114 brelse(dibh);
1115 } 1115 }
@@ -1265,7 +1265,7 @@ int gfs2_xattr_acl_chmod(struct gfs2_inode *ip, struct iattr *attr, char *data)
1265 if (GFS2_EA_IS_STUFFED(el.el_ea)) { 1265 if (GFS2_EA_IS_STUFFED(el.el_ea)) {
1266 error = gfs2_trans_begin(sdp, RES_DINODE + RES_EATTR, 0); 1266 error = gfs2_trans_begin(sdp, RES_DINODE + RES_EATTR, 0);
1267 if (error == 0) { 1267 if (error == 0) {
1268 gfs2_trans_add_bh(ip->i_gl, el.el_bh, 1); 1268 gfs2_trans_add_meta(ip->i_gl, el.el_bh);
1269 memcpy(GFS2_EA2DATA(el.el_ea), data, 1269 memcpy(GFS2_EA2DATA(el.el_ea), data,
1270 GFS2_EA_DATA_LEN(el.el_ea)); 1270 GFS2_EA_DATA_LEN(el.el_ea));
1271 } 1271 }
@@ -1352,7 +1352,7 @@ static int ea_dealloc_indirect(struct gfs2_inode *ip)
1352 if (error) 1352 if (error)
1353 goto out_gunlock; 1353 goto out_gunlock;
1354 1354
1355 gfs2_trans_add_bh(ip->i_gl, indbh, 1); 1355 gfs2_trans_add_meta(ip->i_gl, indbh);
1356 1356
1357 eablk = (__be64 *)(indbh->b_data + sizeof(struct gfs2_meta_header)); 1357 eablk = (__be64 *)(indbh->b_data + sizeof(struct gfs2_meta_header));
1358 bstart = 0; 1358 bstart = 0;
@@ -1384,7 +1384,7 @@ static int ea_dealloc_indirect(struct gfs2_inode *ip)
1384 1384
1385 error = gfs2_meta_inode_buffer(ip, &dibh); 1385 error = gfs2_meta_inode_buffer(ip, &dibh);
1386 if (!error) { 1386 if (!error) {
1387 gfs2_trans_add_bh(ip->i_gl, dibh, 1); 1387 gfs2_trans_add_meta(ip->i_gl, dibh);
1388 gfs2_dinode_out(ip, dibh->b_data); 1388 gfs2_dinode_out(ip, dibh->b_data);
1389 brelse(dibh); 1389 brelse(dibh);
1390 } 1390 }
@@ -1434,7 +1434,7 @@ static int ea_dealloc_block(struct gfs2_inode *ip)
1434 1434
1435 error = gfs2_meta_inode_buffer(ip, &dibh); 1435 error = gfs2_meta_inode_buffer(ip, &dibh);
1436 if (!error) { 1436 if (!error) {
1437 gfs2_trans_add_bh(ip->i_gl, dibh, 1); 1437 gfs2_trans_add_meta(ip->i_gl, dibh);
1438 gfs2_dinode_out(ip, dibh->b_data); 1438 gfs2_dinode_out(ip, dibh->b_data);
1439 brelse(dibh); 1439 brelse(dibh);
1440 } 1440 }
@@ -1461,7 +1461,7 @@ int gfs2_ea_dealloc(struct gfs2_inode *ip)
1461 if (error) 1461 if (error)
1462 return error; 1462 return error;
1463 1463
1464 error = gfs2_quota_hold(ip, NO_QUOTA_CHANGE, NO_QUOTA_CHANGE); 1464 error = gfs2_quota_hold(ip, NO_UID_QUOTA_CHANGE, NO_GID_QUOTA_CHANGE);
1465 if (error) 1465 if (error)
1466 return error; 1466 return error;
1467 1467