diff options
author | Len Brown <len.brown@intel.com> | 2010-08-15 01:06:31 -0400 |
---|---|---|
committer | Len Brown <len.brown@intel.com> | 2010-08-15 01:06:31 -0400 |
commit | 95ee46aa8698f2000647dfb362400fadbb5807cf (patch) | |
tree | e5a05c7297f997e191c73091934e42e3195c0e40 /fs/gfs2 | |
parent | cfa806f059801dbe7e435745eb2e187c8bfe1e7f (diff) | |
parent | 92fa5bd9a946b6e7aab6764e7312e4e3d9bed295 (diff) |
Merge branch 'linus' into release
Conflicts:
drivers/acpi/debug.c
Signed-off-by: Len Brown <len.brown@intel.com>
Diffstat (limited to 'fs/gfs2')
-rw-r--r-- | fs/gfs2/Kconfig | 1 | ||||
-rw-r--r-- | fs/gfs2/aops.c | 19 | ||||
-rw-r--r-- | fs/gfs2/bmap.c | 17 | ||||
-rw-r--r-- | fs/gfs2/bmap.h | 2 | ||||
-rw-r--r-- | fs/gfs2/dir.c | 11 | ||||
-rw-r--r-- | fs/gfs2/file.c | 4 | ||||
-rw-r--r-- | fs/gfs2/glock.c | 105 | ||||
-rw-r--r-- | fs/gfs2/incore.h | 4 | ||||
-rw-r--r-- | fs/gfs2/inode.c | 27 | ||||
-rw-r--r-- | fs/gfs2/log.c | 4 | ||||
-rw-r--r-- | fs/gfs2/main.c | 14 | ||||
-rw-r--r-- | fs/gfs2/meta_io.c | 8 | ||||
-rw-r--r-- | fs/gfs2/ops_fstype.c | 37 | ||||
-rw-r--r-- | fs/gfs2/ops_inode.c | 18 | ||||
-rw-r--r-- | fs/gfs2/quota.c | 25 | ||||
-rw-r--r-- | fs/gfs2/recovery.c | 54 | ||||
-rw-r--r-- | fs/gfs2/recovery.h | 6 | ||||
-rw-r--r-- | fs/gfs2/super.c | 52 | ||||
-rw-r--r-- | fs/gfs2/sys.c | 60 | ||||
-rw-r--r-- | fs/gfs2/xattr.c | 24 |
20 files changed, 292 insertions, 200 deletions
diff --git a/fs/gfs2/Kconfig b/fs/gfs2/Kconfig index a47b43107112..cc9665522148 100644 --- a/fs/gfs2/Kconfig +++ b/fs/gfs2/Kconfig | |||
@@ -7,7 +7,6 @@ config GFS2_FS | |||
7 | select IP_SCTP if DLM_SCTP | 7 | select IP_SCTP if DLM_SCTP |
8 | select FS_POSIX_ACL | 8 | select FS_POSIX_ACL |
9 | select CRC32 | 9 | select CRC32 |
10 | select SLOW_WORK | ||
11 | select QUOTACTL | 10 | select QUOTACTL |
12 | help | 11 | help |
13 | A cluster filesystem. | 12 | A cluster filesystem. |
diff --git a/fs/gfs2/aops.c b/fs/gfs2/aops.c index 9f8b52500d63..194fe16d8418 100644 --- a/fs/gfs2/aops.c +++ b/fs/gfs2/aops.c | |||
@@ -136,10 +136,7 @@ static int gfs2_writeback_writepage(struct page *page, | |||
136 | if (ret <= 0) | 136 | if (ret <= 0) |
137 | return ret; | 137 | return ret; |
138 | 138 | ||
139 | ret = mpage_writepage(page, gfs2_get_block_noalloc, wbc); | 139 | return nobh_writepage(page, gfs2_get_block_noalloc, wbc); |
140 | if (ret == -EAGAIN) | ||
141 | ret = block_write_full_page(page, gfs2_get_block_noalloc, wbc); | ||
142 | return ret; | ||
143 | } | 140 | } |
144 | 141 | ||
145 | /** | 142 | /** |
@@ -637,9 +634,7 @@ static int gfs2_write_begin(struct file *file, struct address_space *mapping, | |||
637 | } | 634 | } |
638 | } | 635 | } |
639 | 636 | ||
640 | error = gfs2_write_alloc_required(ip, pos, len, &alloc_required); | 637 | alloc_required = gfs2_write_alloc_required(ip, pos, len); |
641 | if (error) | ||
642 | goto out_unlock; | ||
643 | 638 | ||
644 | if (alloc_required || gfs2_is_jdata(ip)) | 639 | if (alloc_required || gfs2_is_jdata(ip)) |
645 | gfs2_write_calc_reserv(ip, len, &data_blocks, &ind_blocks); | 640 | gfs2_write_calc_reserv(ip, len, &data_blocks, &ind_blocks); |
@@ -702,12 +697,12 @@ out: | |||
702 | page_cache_release(page); | 697 | page_cache_release(page); |
703 | 698 | ||
704 | /* | 699 | /* |
705 | * XXX(hch): the call below should probably be replaced with | 700 | * XXX(truncate): the call below should probably be replaced with |
706 | * a call to the gfs2-specific truncate blocks helper to actually | 701 | * a call to the gfs2-specific truncate blocks helper to actually |
707 | * release disk blocks.. | 702 | * release disk blocks.. |
708 | */ | 703 | */ |
709 | if (pos + len > ip->i_inode.i_size) | 704 | if (pos + len > ip->i_inode.i_size) |
710 | simple_setsize(&ip->i_inode, ip->i_inode.i_size); | 705 | truncate_setsize(&ip->i_inode, ip->i_inode.i_size); |
711 | out_endtrans: | 706 | out_endtrans: |
712 | gfs2_trans_end(sdp); | 707 | gfs2_trans_end(sdp); |
713 | out_trans_fail: | 708 | out_trans_fail: |
@@ -1047,9 +1042,9 @@ static ssize_t gfs2_direct_IO(int rw, struct kiocb *iocb, | |||
1047 | if (rv != 1) | 1042 | if (rv != 1) |
1048 | goto out; /* dio not valid, fall back to buffered i/o */ | 1043 | goto out; /* dio not valid, fall back to buffered i/o */ |
1049 | 1044 | ||
1050 | rv = blockdev_direct_IO_no_locking(rw, iocb, inode, inode->i_sb->s_bdev, | 1045 | rv = __blockdev_direct_IO(rw, iocb, inode, inode->i_sb->s_bdev, iov, |
1051 | iov, offset, nr_segs, | 1046 | offset, nr_segs, gfs2_get_block_direct, |
1052 | gfs2_get_block_direct, NULL); | 1047 | NULL, NULL, 0); |
1053 | out: | 1048 | out: |
1054 | gfs2_glock_dq_m(1, &gh); | 1049 | gfs2_glock_dq_m(1, &gh); |
1055 | gfs2_holder_uninit(&gh); | 1050 | gfs2_holder_uninit(&gh); |
diff --git a/fs/gfs2/bmap.c b/fs/gfs2/bmap.c index 84da64b551b2..6f482809d1a3 100644 --- a/fs/gfs2/bmap.c +++ b/fs/gfs2/bmap.c | |||
@@ -1040,7 +1040,7 @@ static int trunc_start(struct gfs2_inode *ip, u64 size) | |||
1040 | goto out; | 1040 | goto out; |
1041 | 1041 | ||
1042 | if (gfs2_is_stuffed(ip)) { | 1042 | if (gfs2_is_stuffed(ip)) { |
1043 | u64 dsize = size + sizeof(struct gfs2_inode); | 1043 | u64 dsize = size + sizeof(struct gfs2_dinode); |
1044 | ip->i_disksize = size; | 1044 | ip->i_disksize = size; |
1045 | ip->i_inode.i_mtime = ip->i_inode.i_ctime = CURRENT_TIME; | 1045 | ip->i_inode.i_mtime = ip->i_inode.i_ctime = CURRENT_TIME; |
1046 | gfs2_trans_add_bh(ip->i_gl, dibh, 1); | 1046 | gfs2_trans_add_bh(ip->i_gl, dibh, 1); |
@@ -1244,13 +1244,12 @@ int gfs2_file_dealloc(struct gfs2_inode *ip) | |||
1244 | * @ip: the file being written to | 1244 | * @ip: the file being written to |
1245 | * @offset: the offset to write to | 1245 | * @offset: the offset to write to |
1246 | * @len: the number of bytes being written | 1246 | * @len: the number of bytes being written |
1247 | * @alloc_required: set to 1 if an alloc is required, 0 otherwise | ||
1248 | * | 1247 | * |
1249 | * Returns: errno | 1248 | * Returns: 1 if an alloc is required, 0 otherwise |
1250 | */ | 1249 | */ |
1251 | 1250 | ||
1252 | int gfs2_write_alloc_required(struct gfs2_inode *ip, u64 offset, | 1251 | int gfs2_write_alloc_required(struct gfs2_inode *ip, u64 offset, |
1253 | unsigned int len, int *alloc_required) | 1252 | unsigned int len) |
1254 | { | 1253 | { |
1255 | struct gfs2_sbd *sdp = GFS2_SB(&ip->i_inode); | 1254 | struct gfs2_sbd *sdp = GFS2_SB(&ip->i_inode); |
1256 | struct buffer_head bh; | 1255 | struct buffer_head bh; |
@@ -1258,26 +1257,23 @@ int gfs2_write_alloc_required(struct gfs2_inode *ip, u64 offset, | |||
1258 | u64 lblock, lblock_stop, size; | 1257 | u64 lblock, lblock_stop, size; |
1259 | u64 end_of_file; | 1258 | u64 end_of_file; |
1260 | 1259 | ||
1261 | *alloc_required = 0; | ||
1262 | |||
1263 | if (!len) | 1260 | if (!len) |
1264 | return 0; | 1261 | return 0; |
1265 | 1262 | ||
1266 | if (gfs2_is_stuffed(ip)) { | 1263 | if (gfs2_is_stuffed(ip)) { |
1267 | if (offset + len > | 1264 | if (offset + len > |
1268 | sdp->sd_sb.sb_bsize - sizeof(struct gfs2_dinode)) | 1265 | sdp->sd_sb.sb_bsize - sizeof(struct gfs2_dinode)) |
1269 | *alloc_required = 1; | 1266 | return 1; |
1270 | return 0; | 1267 | return 0; |
1271 | } | 1268 | } |
1272 | 1269 | ||
1273 | *alloc_required = 1; | ||
1274 | shift = sdp->sd_sb.sb_bsize_shift; | 1270 | shift = sdp->sd_sb.sb_bsize_shift; |
1275 | BUG_ON(gfs2_is_dir(ip)); | 1271 | BUG_ON(gfs2_is_dir(ip)); |
1276 | end_of_file = (ip->i_disksize + sdp->sd_sb.sb_bsize - 1) >> shift; | 1272 | end_of_file = (ip->i_disksize + sdp->sd_sb.sb_bsize - 1) >> shift; |
1277 | lblock = offset >> shift; | 1273 | lblock = offset >> shift; |
1278 | lblock_stop = (offset + len + sdp->sd_sb.sb_bsize - 1) >> shift; | 1274 | lblock_stop = (offset + len + sdp->sd_sb.sb_bsize - 1) >> shift; |
1279 | if (lblock_stop > end_of_file) | 1275 | if (lblock_stop > end_of_file) |
1280 | return 0; | 1276 | return 1; |
1281 | 1277 | ||
1282 | size = (lblock_stop - lblock) << shift; | 1278 | size = (lblock_stop - lblock) << shift; |
1283 | do { | 1279 | do { |
@@ -1285,12 +1281,11 @@ int gfs2_write_alloc_required(struct gfs2_inode *ip, u64 offset, | |||
1285 | bh.b_size = size; | 1281 | bh.b_size = size; |
1286 | gfs2_block_map(&ip->i_inode, lblock, &bh, 0); | 1282 | gfs2_block_map(&ip->i_inode, lblock, &bh, 0); |
1287 | if (!buffer_mapped(&bh)) | 1283 | if (!buffer_mapped(&bh)) |
1288 | return 0; | 1284 | return 1; |
1289 | size -= bh.b_size; | 1285 | size -= bh.b_size; |
1290 | lblock += (bh.b_size >> ip->i_inode.i_blkbits); | 1286 | lblock += (bh.b_size >> ip->i_inode.i_blkbits); |
1291 | } while(size > 0); | 1287 | } while(size > 0); |
1292 | 1288 | ||
1293 | *alloc_required = 0; | ||
1294 | return 0; | 1289 | return 0; |
1295 | } | 1290 | } |
1296 | 1291 | ||
diff --git a/fs/gfs2/bmap.h b/fs/gfs2/bmap.h index c983177e05ac..a20a5213135a 100644 --- a/fs/gfs2/bmap.h +++ b/fs/gfs2/bmap.h | |||
@@ -52,6 +52,6 @@ int gfs2_truncatei(struct gfs2_inode *ip, u64 size); | |||
52 | int gfs2_truncatei_resume(struct gfs2_inode *ip); | 52 | int gfs2_truncatei_resume(struct gfs2_inode *ip); |
53 | int gfs2_file_dealloc(struct gfs2_inode *ip); | 53 | int gfs2_file_dealloc(struct gfs2_inode *ip); |
54 | int gfs2_write_alloc_required(struct gfs2_inode *ip, u64 offset, | 54 | int gfs2_write_alloc_required(struct gfs2_inode *ip, u64 offset, |
55 | unsigned int len, int *alloc_required); | 55 | unsigned int len); |
56 | 56 | ||
57 | #endif /* __BMAP_DOT_H__ */ | 57 | #endif /* __BMAP_DOT_H__ */ |
diff --git a/fs/gfs2/dir.c b/fs/gfs2/dir.c index 6b48d7c268b2..b9dd88a78dd4 100644 --- a/fs/gfs2/dir.c +++ b/fs/gfs2/dir.c | |||
@@ -955,7 +955,12 @@ static int dir_split_leaf(struct inode *inode, const struct qstr *name) | |||
955 | /* Change the pointers. | 955 | /* Change the pointers. |
956 | Don't bother distinguishing stuffed from non-stuffed. | 956 | Don't bother distinguishing stuffed from non-stuffed. |
957 | This code is complicated enough already. */ | 957 | This code is complicated enough already. */ |
958 | lp = kmalloc(half_len * sizeof(__be64), GFP_NOFS | __GFP_NOFAIL); | 958 | lp = kmalloc(half_len * sizeof(__be64), GFP_NOFS); |
959 | if (!lp) { | ||
960 | error = -ENOMEM; | ||
961 | goto fail_brelse; | ||
962 | } | ||
963 | |||
959 | /* Change the pointers */ | 964 | /* Change the pointers */ |
960 | for (x = 0; x < half_len; x++) | 965 | for (x = 0; x < half_len; x++) |
961 | lp[x] = cpu_to_be64(bn); | 966 | lp[x] = cpu_to_be64(bn); |
@@ -1063,7 +1068,9 @@ static int dir_double_exhash(struct gfs2_inode *dip) | |||
1063 | 1068 | ||
1064 | /* Allocate both the "from" and "to" buffers in one big chunk */ | 1069 | /* Allocate both the "from" and "to" buffers in one big chunk */ |
1065 | 1070 | ||
1066 | buf = kcalloc(3, sdp->sd_hash_bsize, GFP_NOFS | __GFP_NOFAIL); | 1071 | buf = kcalloc(3, sdp->sd_hash_bsize, GFP_NOFS); |
1072 | if (!buf) | ||
1073 | return -ENOMEM; | ||
1067 | 1074 | ||
1068 | for (block = dip->i_disksize >> sdp->sd_hash_bsize_shift; block--;) { | 1075 | for (block = dip->i_disksize >> sdp->sd_hash_bsize_shift; block--;) { |
1069 | error = gfs2_dir_read_data(dip, (char *)buf, | 1076 | error = gfs2_dir_read_data(dip, (char *)buf, |
diff --git a/fs/gfs2/file.c b/fs/gfs2/file.c index ed9a94f0ef15..4edd662c8232 100644 --- a/fs/gfs2/file.c +++ b/fs/gfs2/file.c | |||
@@ -351,7 +351,6 @@ static int gfs2_page_mkwrite(struct vm_area_struct *vma, struct vm_fault *vmf) | |||
351 | unsigned long last_index; | 351 | unsigned long last_index; |
352 | u64 pos = page->index << PAGE_CACHE_SHIFT; | 352 | u64 pos = page->index << PAGE_CACHE_SHIFT; |
353 | unsigned int data_blocks, ind_blocks, rblocks; | 353 | unsigned int data_blocks, ind_blocks, rblocks; |
354 | int alloc_required = 0; | ||
355 | struct gfs2_holder gh; | 354 | struct gfs2_holder gh; |
356 | struct gfs2_alloc *al; | 355 | struct gfs2_alloc *al; |
357 | int ret; | 356 | int ret; |
@@ -364,8 +363,7 @@ static int gfs2_page_mkwrite(struct vm_area_struct *vma, struct vm_fault *vmf) | |||
364 | set_bit(GLF_DIRTY, &ip->i_gl->gl_flags); | 363 | set_bit(GLF_DIRTY, &ip->i_gl->gl_flags); |
365 | set_bit(GIF_SW_PAGED, &ip->i_flags); | 364 | set_bit(GIF_SW_PAGED, &ip->i_flags); |
366 | 365 | ||
367 | ret = gfs2_write_alloc_required(ip, pos, PAGE_CACHE_SIZE, &alloc_required); | 366 | if (!gfs2_write_alloc_required(ip, pos, PAGE_CACHE_SIZE)) |
368 | if (ret || !alloc_required) | ||
369 | goto out_unlock; | 367 | goto out_unlock; |
370 | ret = -ENOMEM; | 368 | ret = -ENOMEM; |
371 | al = gfs2_alloc_get(ip); | 369 | al = gfs2_alloc_get(ip); |
diff --git a/fs/gfs2/glock.c b/fs/gfs2/glock.c index 0898f3ec8212..9adf8f924e08 100644 --- a/fs/gfs2/glock.c +++ b/fs/gfs2/glock.c | |||
@@ -328,6 +328,30 @@ static void gfs2_holder_wake(struct gfs2_holder *gh) | |||
328 | } | 328 | } |
329 | 329 | ||
330 | /** | 330 | /** |
331 | * do_error - Something unexpected has happened during a lock request | ||
332 | * | ||
333 | */ | ||
334 | |||
335 | static inline void do_error(struct gfs2_glock *gl, const int ret) | ||
336 | { | ||
337 | struct gfs2_holder *gh, *tmp; | ||
338 | |||
339 | list_for_each_entry_safe(gh, tmp, &gl->gl_holders, gh_list) { | ||
340 | if (test_bit(HIF_HOLDER, &gh->gh_iflags)) | ||
341 | continue; | ||
342 | if (ret & LM_OUT_ERROR) | ||
343 | gh->gh_error = -EIO; | ||
344 | else if (gh->gh_flags & (LM_FLAG_TRY | LM_FLAG_TRY_1CB)) | ||
345 | gh->gh_error = GLR_TRYFAILED; | ||
346 | else | ||
347 | continue; | ||
348 | list_del_init(&gh->gh_list); | ||
349 | trace_gfs2_glock_queue(gh, 0); | ||
350 | gfs2_holder_wake(gh); | ||
351 | } | ||
352 | } | ||
353 | |||
354 | /** | ||
331 | * do_promote - promote as many requests as possible on the current queue | 355 | * do_promote - promote as many requests as possible on the current queue |
332 | * @gl: The glock | 356 | * @gl: The glock |
333 | * | 357 | * |
@@ -375,36 +399,13 @@ restart: | |||
375 | } | 399 | } |
376 | if (gh->gh_list.prev == &gl->gl_holders) | 400 | if (gh->gh_list.prev == &gl->gl_holders) |
377 | return 1; | 401 | return 1; |
402 | do_error(gl, 0); | ||
378 | break; | 403 | break; |
379 | } | 404 | } |
380 | return 0; | 405 | return 0; |
381 | } | 406 | } |
382 | 407 | ||
383 | /** | 408 | /** |
384 | * do_error - Something unexpected has happened during a lock request | ||
385 | * | ||
386 | */ | ||
387 | |||
388 | static inline void do_error(struct gfs2_glock *gl, const int ret) | ||
389 | { | ||
390 | struct gfs2_holder *gh, *tmp; | ||
391 | |||
392 | list_for_each_entry_safe(gh, tmp, &gl->gl_holders, gh_list) { | ||
393 | if (test_bit(HIF_HOLDER, &gh->gh_iflags)) | ||
394 | continue; | ||
395 | if (ret & LM_OUT_ERROR) | ||
396 | gh->gh_error = -EIO; | ||
397 | else if (gh->gh_flags & (LM_FLAG_TRY | LM_FLAG_TRY_1CB)) | ||
398 | gh->gh_error = GLR_TRYFAILED; | ||
399 | else | ||
400 | continue; | ||
401 | list_del_init(&gh->gh_list); | ||
402 | trace_gfs2_glock_queue(gh, 0); | ||
403 | gfs2_holder_wake(gh); | ||
404 | } | ||
405 | } | ||
406 | |||
407 | /** | ||
408 | * find_first_waiter - find the first gh that's waiting for the glock | 409 | * find_first_waiter - find the first gh that's waiting for the glock |
409 | * @gl: the glock | 410 | * @gl: the glock |
410 | */ | 411 | */ |
@@ -706,18 +707,8 @@ static void glock_work_func(struct work_struct *work) | |||
706 | { | 707 | { |
707 | unsigned long delay = 0; | 708 | unsigned long delay = 0; |
708 | struct gfs2_glock *gl = container_of(work, struct gfs2_glock, gl_work.work); | 709 | struct gfs2_glock *gl = container_of(work, struct gfs2_glock, gl_work.work); |
709 | struct gfs2_holder *gh; | ||
710 | int drop_ref = 0; | 710 | int drop_ref = 0; |
711 | 711 | ||
712 | if (unlikely(test_bit(GLF_FROZEN, &gl->gl_flags))) { | ||
713 | spin_lock(&gl->gl_spin); | ||
714 | gh = find_first_waiter(gl); | ||
715 | if (gh && (gh->gh_flags & LM_FLAG_NOEXP) && | ||
716 | test_and_clear_bit(GLF_FROZEN, &gl->gl_flags)) | ||
717 | set_bit(GLF_REPLY_PENDING, &gl->gl_flags); | ||
718 | spin_unlock(&gl->gl_spin); | ||
719 | } | ||
720 | |||
721 | if (test_and_clear_bit(GLF_REPLY_PENDING, &gl->gl_flags)) { | 712 | if (test_and_clear_bit(GLF_REPLY_PENDING, &gl->gl_flags)) { |
722 | finish_xmote(gl, gl->gl_reply); | 713 | finish_xmote(gl, gl->gl_reply); |
723 | drop_ref = 1; | 714 | drop_ref = 1; |
@@ -1072,6 +1063,9 @@ int gfs2_glock_nq(struct gfs2_holder *gh) | |||
1072 | 1063 | ||
1073 | spin_lock(&gl->gl_spin); | 1064 | spin_lock(&gl->gl_spin); |
1074 | add_to_queue(gh); | 1065 | add_to_queue(gh); |
1066 | if ((LM_FLAG_NOEXP & gh->gh_flags) && | ||
1067 | test_and_clear_bit(GLF_FROZEN, &gl->gl_flags)) | ||
1068 | set_bit(GLF_REPLY_PENDING, &gl->gl_flags); | ||
1075 | run_queue(gl, 1); | 1069 | run_queue(gl, 1); |
1076 | spin_unlock(&gl->gl_spin); | 1070 | spin_unlock(&gl->gl_spin); |
1077 | 1071 | ||
@@ -1329,6 +1323,36 @@ void gfs2_glock_cb(struct gfs2_glock *gl, unsigned int state) | |||
1329 | } | 1323 | } |
1330 | 1324 | ||
1331 | /** | 1325 | /** |
1326 | * gfs2_should_freeze - Figure out if glock should be frozen | ||
1327 | * @gl: The glock in question | ||
1328 | * | ||
1329 | * Glocks are not frozen if (a) the result of the dlm operation is | ||
1330 | * an error, (b) the locking operation was an unlock operation or | ||
1331 | * (c) if there is a "noexp" flagged request anywhere in the queue | ||
1332 | * | ||
1333 | * Returns: 1 if freezing should occur, 0 otherwise | ||
1334 | */ | ||
1335 | |||
1336 | static int gfs2_should_freeze(const struct gfs2_glock *gl) | ||
1337 | { | ||
1338 | const struct gfs2_holder *gh; | ||
1339 | |||
1340 | if (gl->gl_reply & ~LM_OUT_ST_MASK) | ||
1341 | return 0; | ||
1342 | if (gl->gl_target == LM_ST_UNLOCKED) | ||
1343 | return 0; | ||
1344 | |||
1345 | list_for_each_entry(gh, &gl->gl_holders, gh_list) { | ||
1346 | if (test_bit(HIF_HOLDER, &gh->gh_iflags)) | ||
1347 | continue; | ||
1348 | if (LM_FLAG_NOEXP & gh->gh_flags) | ||
1349 | return 0; | ||
1350 | } | ||
1351 | |||
1352 | return 1; | ||
1353 | } | ||
1354 | |||
1355 | /** | ||
1332 | * gfs2_glock_complete - Callback used by locking | 1356 | * gfs2_glock_complete - Callback used by locking |
1333 | * @gl: Pointer to the glock | 1357 | * @gl: Pointer to the glock |
1334 | * @ret: The return value from the dlm | 1358 | * @ret: The return value from the dlm |
@@ -1338,18 +1362,17 @@ void gfs2_glock_cb(struct gfs2_glock *gl, unsigned int state) | |||
1338 | void gfs2_glock_complete(struct gfs2_glock *gl, int ret) | 1362 | void gfs2_glock_complete(struct gfs2_glock *gl, int ret) |
1339 | { | 1363 | { |
1340 | struct lm_lockstruct *ls = &gl->gl_sbd->sd_lockstruct; | 1364 | struct lm_lockstruct *ls = &gl->gl_sbd->sd_lockstruct; |
1365 | |||
1341 | gl->gl_reply = ret; | 1366 | gl->gl_reply = ret; |
1367 | |||
1342 | if (unlikely(test_bit(DFL_BLOCK_LOCKS, &ls->ls_flags))) { | 1368 | if (unlikely(test_bit(DFL_BLOCK_LOCKS, &ls->ls_flags))) { |
1343 | struct gfs2_holder *gh; | ||
1344 | spin_lock(&gl->gl_spin); | 1369 | spin_lock(&gl->gl_spin); |
1345 | gh = find_first_waiter(gl); | 1370 | if (gfs2_should_freeze(gl)) { |
1346 | if ((!(gh && (gh->gh_flags & LM_FLAG_NOEXP)) && | ||
1347 | (gl->gl_target != LM_ST_UNLOCKED)) || | ||
1348 | ((ret & ~LM_OUT_ST_MASK) != 0)) | ||
1349 | set_bit(GLF_FROZEN, &gl->gl_flags); | 1371 | set_bit(GLF_FROZEN, &gl->gl_flags); |
1350 | spin_unlock(&gl->gl_spin); | 1372 | spin_unlock(&gl->gl_spin); |
1351 | if (test_bit(GLF_FROZEN, &gl->gl_flags)) | ||
1352 | return; | 1373 | return; |
1374 | } | ||
1375 | spin_unlock(&gl->gl_spin); | ||
1353 | } | 1376 | } |
1354 | set_bit(GLF_REPLY_PENDING, &gl->gl_flags); | 1377 | set_bit(GLF_REPLY_PENDING, &gl->gl_flags); |
1355 | gfs2_glock_hold(gl); | 1378 | gfs2_glock_hold(gl); |
diff --git a/fs/gfs2/incore.h b/fs/gfs2/incore.h index b5d7363b22da..fdbf4b366fa5 100644 --- a/fs/gfs2/incore.h +++ b/fs/gfs2/incore.h | |||
@@ -12,7 +12,6 @@ | |||
12 | 12 | ||
13 | #include <linux/fs.h> | 13 | #include <linux/fs.h> |
14 | #include <linux/workqueue.h> | 14 | #include <linux/workqueue.h> |
15 | #include <linux/slow-work.h> | ||
16 | #include <linux/dlm.h> | 15 | #include <linux/dlm.h> |
17 | #include <linux/buffer_head.h> | 16 | #include <linux/buffer_head.h> |
18 | 17 | ||
@@ -383,7 +382,7 @@ struct gfs2_journal_extent { | |||
383 | struct gfs2_jdesc { | 382 | struct gfs2_jdesc { |
384 | struct list_head jd_list; | 383 | struct list_head jd_list; |
385 | struct list_head extent_list; | 384 | struct list_head extent_list; |
386 | struct slow_work jd_work; | 385 | struct work_struct jd_work; |
387 | struct inode *jd_inode; | 386 | struct inode *jd_inode; |
388 | unsigned long jd_flags; | 387 | unsigned long jd_flags; |
389 | #define JDF_RECOVERY 1 | 388 | #define JDF_RECOVERY 1 |
@@ -460,6 +459,7 @@ enum { | |||
460 | SDF_NOBARRIERS = 3, | 459 | SDF_NOBARRIERS = 3, |
461 | SDF_NORECOVERY = 4, | 460 | SDF_NORECOVERY = 4, |
462 | SDF_DEMOTE = 5, | 461 | SDF_DEMOTE = 5, |
462 | SDF_NOJOURNALID = 6, | ||
463 | }; | 463 | }; |
464 | 464 | ||
465 | #define GFS2_FSNAME_LEN 256 | 465 | #define GFS2_FSNAME_LEN 256 |
diff --git a/fs/gfs2/inode.c b/fs/gfs2/inode.c index f03afd9c44bc..08140f185a37 100644 --- a/fs/gfs2/inode.c +++ b/fs/gfs2/inode.c | |||
@@ -84,7 +84,7 @@ static int iget_skip_test(struct inode *inode, void *opaque) | |||
84 | struct gfs2_skip_data *data = opaque; | 84 | struct gfs2_skip_data *data = opaque; |
85 | 85 | ||
86 | if (ip->i_no_addr == data->no_addr) { | 86 | if (ip->i_no_addr == data->no_addr) { |
87 | if (inode->i_state & (I_FREEING|I_CLEAR|I_WILL_FREE)){ | 87 | if (inode->i_state & (I_FREEING|I_WILL_FREE)){ |
88 | data->skipped = 1; | 88 | data->skipped = 1; |
89 | return 0; | 89 | return 0; |
90 | } | 90 | } |
@@ -991,18 +991,29 @@ fail: | |||
991 | 991 | ||
992 | static int __gfs2_setattr_simple(struct gfs2_inode *ip, struct iattr *attr) | 992 | static int __gfs2_setattr_simple(struct gfs2_inode *ip, struct iattr *attr) |
993 | { | 993 | { |
994 | struct inode *inode = &ip->i_inode; | ||
994 | struct buffer_head *dibh; | 995 | struct buffer_head *dibh; |
995 | int error; | 996 | int error; |
996 | 997 | ||
997 | error = gfs2_meta_inode_buffer(ip, &dibh); | 998 | error = gfs2_meta_inode_buffer(ip, &dibh); |
998 | if (!error) { | 999 | if (error) |
999 | error = inode_setattr(&ip->i_inode, attr); | 1000 | return error; |
1000 | gfs2_assert_warn(GFS2_SB(&ip->i_inode), !error); | 1001 | |
1001 | gfs2_trans_add_bh(ip->i_gl, dibh, 1); | 1002 | if ((attr->ia_valid & ATTR_SIZE) && |
1002 | gfs2_dinode_out(ip, dibh->b_data); | 1003 | attr->ia_size != i_size_read(inode)) { |
1003 | brelse(dibh); | 1004 | error = vmtruncate(inode, attr->ia_size); |
1005 | if (error) | ||
1006 | return error; | ||
1004 | } | 1007 | } |
1005 | return error; | 1008 | |
1009 | setattr_copy(inode, attr); | ||
1010 | mark_inode_dirty(inode); | ||
1011 | |||
1012 | gfs2_assert_warn(GFS2_SB(inode), !error); | ||
1013 | gfs2_trans_add_bh(ip->i_gl, dibh, 1); | ||
1014 | gfs2_dinode_out(ip, dibh->b_data); | ||
1015 | brelse(dibh); | ||
1016 | return 0; | ||
1006 | } | 1017 | } |
1007 | 1018 | ||
1008 | /** | 1019 | /** |
diff --git a/fs/gfs2/log.c b/fs/gfs2/log.c index 6a857e24f947..cde1248a6225 100644 --- a/fs/gfs2/log.c +++ b/fs/gfs2/log.c | |||
@@ -595,7 +595,7 @@ static void log_write_header(struct gfs2_sbd *sdp, u32 flags, int pull) | |||
595 | if (test_bit(SDF_NOBARRIERS, &sdp->sd_flags)) | 595 | if (test_bit(SDF_NOBARRIERS, &sdp->sd_flags)) |
596 | goto skip_barrier; | 596 | goto skip_barrier; |
597 | get_bh(bh); | 597 | get_bh(bh); |
598 | submit_bh(WRITE_SYNC | (1 << BIO_RW_BARRIER) | (1 << BIO_RW_META), bh); | 598 | submit_bh(WRITE_BARRIER | REQ_META, bh); |
599 | wait_on_buffer(bh); | 599 | wait_on_buffer(bh); |
600 | if (buffer_eopnotsupp(bh)) { | 600 | if (buffer_eopnotsupp(bh)) { |
601 | clear_buffer_eopnotsupp(bh); | 601 | clear_buffer_eopnotsupp(bh); |
@@ -605,7 +605,7 @@ static void log_write_header(struct gfs2_sbd *sdp, u32 flags, int pull) | |||
605 | lock_buffer(bh); | 605 | lock_buffer(bh); |
606 | skip_barrier: | 606 | skip_barrier: |
607 | get_bh(bh); | 607 | get_bh(bh); |
608 | submit_bh(WRITE_SYNC | (1 << BIO_RW_META), bh); | 608 | submit_bh(WRITE_SYNC | REQ_META, bh); |
609 | wait_on_buffer(bh); | 609 | wait_on_buffer(bh); |
610 | } | 610 | } |
611 | if (!buffer_uptodate(bh)) | 611 | if (!buffer_uptodate(bh)) |
diff --git a/fs/gfs2/main.c b/fs/gfs2/main.c index fb2a5f93b7c3..b1e9630eb46a 100644 --- a/fs/gfs2/main.c +++ b/fs/gfs2/main.c | |||
@@ -15,7 +15,6 @@ | |||
15 | #include <linux/init.h> | 15 | #include <linux/init.h> |
16 | #include <linux/gfs2_ondisk.h> | 16 | #include <linux/gfs2_ondisk.h> |
17 | #include <asm/atomic.h> | 17 | #include <asm/atomic.h> |
18 | #include <linux/slow-work.h> | ||
19 | 18 | ||
20 | #include "gfs2.h" | 19 | #include "gfs2.h" |
21 | #include "incore.h" | 20 | #include "incore.h" |
@@ -24,6 +23,7 @@ | |||
24 | #include "util.h" | 23 | #include "util.h" |
25 | #include "glock.h" | 24 | #include "glock.h" |
26 | #include "quota.h" | 25 | #include "quota.h" |
26 | #include "recovery.h" | ||
27 | 27 | ||
28 | static struct shrinker qd_shrinker = { | 28 | static struct shrinker qd_shrinker = { |
29 | .shrink = gfs2_shrink_qd_memory, | 29 | .shrink = gfs2_shrink_qd_memory, |
@@ -138,9 +138,11 @@ static int __init init_gfs2_fs(void) | |||
138 | if (error) | 138 | if (error) |
139 | goto fail_unregister; | 139 | goto fail_unregister; |
140 | 140 | ||
141 | error = slow_work_register_user(THIS_MODULE); | 141 | error = -ENOMEM; |
142 | if (error) | 142 | gfs_recovery_wq = alloc_workqueue("gfs_recovery", |
143 | goto fail_slow; | 143 | WQ_NON_REENTRANT | WQ_RESCUER, 0); |
144 | if (!gfs_recovery_wq) | ||
145 | goto fail_wq; | ||
144 | 146 | ||
145 | gfs2_register_debugfs(); | 147 | gfs2_register_debugfs(); |
146 | 148 | ||
@@ -148,7 +150,7 @@ static int __init init_gfs2_fs(void) | |||
148 | 150 | ||
149 | return 0; | 151 | return 0; |
150 | 152 | ||
151 | fail_slow: | 153 | fail_wq: |
152 | unregister_filesystem(&gfs2meta_fs_type); | 154 | unregister_filesystem(&gfs2meta_fs_type); |
153 | fail_unregister: | 155 | fail_unregister: |
154 | unregister_filesystem(&gfs2_fs_type); | 156 | unregister_filesystem(&gfs2_fs_type); |
@@ -190,7 +192,7 @@ static void __exit exit_gfs2_fs(void) | |||
190 | gfs2_unregister_debugfs(); | 192 | gfs2_unregister_debugfs(); |
191 | unregister_filesystem(&gfs2_fs_type); | 193 | unregister_filesystem(&gfs2_fs_type); |
192 | unregister_filesystem(&gfs2meta_fs_type); | 194 | unregister_filesystem(&gfs2meta_fs_type); |
193 | slow_work_unregister_user(THIS_MODULE); | 195 | destroy_workqueue(gfs_recovery_wq); |
194 | 196 | ||
195 | kmem_cache_destroy(gfs2_quotad_cachep); | 197 | kmem_cache_destroy(gfs2_quotad_cachep); |
196 | kmem_cache_destroy(gfs2_rgrpd_cachep); | 198 | kmem_cache_destroy(gfs2_rgrpd_cachep); |
diff --git a/fs/gfs2/meta_io.c b/fs/gfs2/meta_io.c index 18176d0b75d7..f3b071f921aa 100644 --- a/fs/gfs2/meta_io.c +++ b/fs/gfs2/meta_io.c | |||
@@ -36,8 +36,8 @@ static int gfs2_aspace_writepage(struct page *page, struct writeback_control *wb | |||
36 | { | 36 | { |
37 | struct buffer_head *bh, *head; | 37 | struct buffer_head *bh, *head; |
38 | int nr_underway = 0; | 38 | int nr_underway = 0; |
39 | int write_op = (1 << BIO_RW_META) | ((wbc->sync_mode == WB_SYNC_ALL ? | 39 | int write_op = REQ_META | |
40 | WRITE_SYNC_PLUG : WRITE)); | 40 | (wbc->sync_mode == WB_SYNC_ALL ? WRITE_SYNC_PLUG : WRITE); |
41 | 41 | ||
42 | BUG_ON(!PageLocked(page)); | 42 | BUG_ON(!PageLocked(page)); |
43 | BUG_ON(!page_has_buffers(page)); | 43 | BUG_ON(!page_has_buffers(page)); |
@@ -225,7 +225,7 @@ int gfs2_meta_read(struct gfs2_glock *gl, u64 blkno, int flags, | |||
225 | } | 225 | } |
226 | bh->b_end_io = end_buffer_read_sync; | 226 | bh->b_end_io = end_buffer_read_sync; |
227 | get_bh(bh); | 227 | get_bh(bh); |
228 | submit_bh(READ_SYNC | (1 << BIO_RW_META), bh); | 228 | submit_bh(READ_SYNC | REQ_META, bh); |
229 | if (!(flags & DIO_WAIT)) | 229 | if (!(flags & DIO_WAIT)) |
230 | return 0; | 230 | return 0; |
231 | 231 | ||
@@ -432,7 +432,7 @@ struct buffer_head *gfs2_meta_ra(struct gfs2_glock *gl, u64 dblock, u32 extlen) | |||
432 | if (buffer_uptodate(first_bh)) | 432 | if (buffer_uptodate(first_bh)) |
433 | goto out; | 433 | goto out; |
434 | if (!buffer_locked(first_bh)) | 434 | if (!buffer_locked(first_bh)) |
435 | ll_rw_block(READ_SYNC | (1 << BIO_RW_META), 1, &first_bh); | 435 | ll_rw_block(READ_SYNC | REQ_META, 1, &first_bh); |
436 | 436 | ||
437 | dblock++; | 437 | dblock++; |
438 | extlen--; | 438 | extlen--; |
diff --git a/fs/gfs2/ops_fstype.c b/fs/gfs2/ops_fstype.c index 3593b3a7290e..4d4b1e8ac64c 100644 --- a/fs/gfs2/ops_fstype.c +++ b/fs/gfs2/ops_fstype.c | |||
@@ -17,7 +17,6 @@ | |||
17 | #include <linux/namei.h> | 17 | #include <linux/namei.h> |
18 | #include <linux/mount.h> | 18 | #include <linux/mount.h> |
19 | #include <linux/gfs2_ondisk.h> | 19 | #include <linux/gfs2_ondisk.h> |
20 | #include <linux/slow-work.h> | ||
21 | #include <linux/quotaops.h> | 20 | #include <linux/quotaops.h> |
22 | 21 | ||
23 | #include "gfs2.h" | 22 | #include "gfs2.h" |
@@ -76,7 +75,7 @@ static struct gfs2_sbd *init_sbd(struct super_block *sb) | |||
76 | 75 | ||
77 | sb->s_fs_info = sdp; | 76 | sb->s_fs_info = sdp; |
78 | sdp->sd_vfs = sb; | 77 | sdp->sd_vfs = sb; |
79 | 78 | set_bit(SDF_NOJOURNALID, &sdp->sd_flags); | |
80 | gfs2_tune_init(&sdp->sd_tune); | 79 | gfs2_tune_init(&sdp->sd_tune); |
81 | 80 | ||
82 | init_waitqueue_head(&sdp->sd_glock_wait); | 81 | init_waitqueue_head(&sdp->sd_glock_wait); |
@@ -275,7 +274,7 @@ static int gfs2_read_super(struct gfs2_sbd *sdp, sector_t sector) | |||
275 | 274 | ||
276 | bio->bi_end_io = end_bio_io_page; | 275 | bio->bi_end_io = end_bio_io_page; |
277 | bio->bi_private = page; | 276 | bio->bi_private = page; |
278 | submit_bio(READ_SYNC | (1 << BIO_RW_META), bio); | 277 | submit_bio(READ_SYNC | REQ_META, bio); |
279 | wait_on_page_locked(page); | 278 | wait_on_page_locked(page); |
280 | bio_put(bio); | 279 | bio_put(bio); |
281 | if (!PageUptodate(page)) { | 280 | if (!PageUptodate(page)) { |
@@ -673,7 +672,7 @@ static int gfs2_jindex_hold(struct gfs2_sbd *sdp, struct gfs2_holder *ji_gh) | |||
673 | break; | 672 | break; |
674 | 673 | ||
675 | INIT_LIST_HEAD(&jd->extent_list); | 674 | INIT_LIST_HEAD(&jd->extent_list); |
676 | slow_work_init(&jd->jd_work, &gfs2_recover_ops); | 675 | INIT_WORK(&jd->jd_work, gfs2_recover_func); |
677 | jd->jd_inode = gfs2_lookupi(sdp->sd_jindex, &name, 1); | 676 | jd->jd_inode = gfs2_lookupi(sdp->sd_jindex, &name, 1); |
678 | if (!jd->jd_inode || IS_ERR(jd->jd_inode)) { | 677 | if (!jd->jd_inode || IS_ERR(jd->jd_inode)) { |
679 | if (!jd->jd_inode) | 678 | if (!jd->jd_inode) |
@@ -782,7 +781,8 @@ static int init_journal(struct gfs2_sbd *sdp, int undo) | |||
782 | if (sdp->sd_lockstruct.ls_first) { | 781 | if (sdp->sd_lockstruct.ls_first) { |
783 | unsigned int x; | 782 | unsigned int x; |
784 | for (x = 0; x < sdp->sd_journals; x++) { | 783 | for (x = 0; x < sdp->sd_journals; x++) { |
785 | error = gfs2_recover_journal(gfs2_jdesc_find(sdp, x)); | 784 | error = gfs2_recover_journal(gfs2_jdesc_find(sdp, x), |
785 | true); | ||
786 | if (error) { | 786 | if (error) { |
787 | fs_err(sdp, "error recovering journal %u: %d\n", | 787 | fs_err(sdp, "error recovering journal %u: %d\n", |
788 | x, error); | 788 | x, error); |
@@ -792,7 +792,7 @@ static int init_journal(struct gfs2_sbd *sdp, int undo) | |||
792 | 792 | ||
793 | gfs2_others_may_mount(sdp); | 793 | gfs2_others_may_mount(sdp); |
794 | } else if (!sdp->sd_args.ar_spectator) { | 794 | } else if (!sdp->sd_args.ar_spectator) { |
795 | error = gfs2_recover_journal(sdp->sd_jdesc); | 795 | error = gfs2_recover_journal(sdp->sd_jdesc, true); |
796 | if (error) { | 796 | if (error) { |
797 | fs_err(sdp, "error recovering my journal: %d\n", error); | 797 | fs_err(sdp, "error recovering my journal: %d\n", error); |
798 | goto fail_jinode_gh; | 798 | goto fail_jinode_gh; |
@@ -1050,7 +1050,8 @@ static int gfs2_lm_mount(struct gfs2_sbd *sdp, int silent) | |||
1050 | ret = match_int(&tmp[0], &option); | 1050 | ret = match_int(&tmp[0], &option); |
1051 | if (ret || option < 0) | 1051 | if (ret || option < 0) |
1052 | goto hostdata_error; | 1052 | goto hostdata_error; |
1053 | ls->ls_jid = option; | 1053 | if (test_and_clear_bit(SDF_NOJOURNALID, &sdp->sd_flags)) |
1054 | ls->ls_jid = option; | ||
1054 | break; | 1055 | break; |
1055 | case Opt_id: | 1056 | case Opt_id: |
1056 | /* Obsolete, but left for backward compat purposes */ | 1057 | /* Obsolete, but left for backward compat purposes */ |
@@ -1102,6 +1103,24 @@ void gfs2_lm_unmount(struct gfs2_sbd *sdp) | |||
1102 | lm->lm_unmount(sdp); | 1103 | lm->lm_unmount(sdp); |
1103 | } | 1104 | } |
1104 | 1105 | ||
1106 | static int gfs2_journalid_wait(void *word) | ||
1107 | { | ||
1108 | if (signal_pending(current)) | ||
1109 | return -EINTR; | ||
1110 | schedule(); | ||
1111 | return 0; | ||
1112 | } | ||
1113 | |||
1114 | static int wait_on_journal(struct gfs2_sbd *sdp) | ||
1115 | { | ||
1116 | if (sdp->sd_args.ar_spectator) | ||
1117 | return 0; | ||
1118 | if (sdp->sd_lockstruct.ls_ops->lm_mount == NULL) | ||
1119 | return 0; | ||
1120 | |||
1121 | return wait_on_bit(&sdp->sd_flags, SDF_NOJOURNALID, gfs2_journalid_wait, TASK_INTERRUPTIBLE); | ||
1122 | } | ||
1123 | |||
1105 | void gfs2_online_uevent(struct gfs2_sbd *sdp) | 1124 | void gfs2_online_uevent(struct gfs2_sbd *sdp) |
1106 | { | 1125 | { |
1107 | struct super_block *sb = sdp->sd_vfs; | 1126 | struct super_block *sb = sdp->sd_vfs; |
@@ -1194,6 +1213,10 @@ static int fill_super(struct super_block *sb, struct gfs2_args *args, int silent | |||
1194 | if (error) | 1213 | if (error) |
1195 | goto fail_locking; | 1214 | goto fail_locking; |
1196 | 1215 | ||
1216 | error = wait_on_journal(sdp); | ||
1217 | if (error) | ||
1218 | goto fail_sb; | ||
1219 | |||
1197 | error = init_inodes(sdp, DO); | 1220 | error = init_inodes(sdp, DO); |
1198 | if (error) | 1221 | if (error) |
1199 | goto fail_sb; | 1222 | goto fail_sb; |
diff --git a/fs/gfs2/ops_inode.c b/fs/gfs2/ops_inode.c index 98cdd05f3316..1009be2c9737 100644 --- a/fs/gfs2/ops_inode.c +++ b/fs/gfs2/ops_inode.c | |||
@@ -1072,7 +1072,7 @@ int gfs2_permission(struct inode *inode, int mask) | |||
1072 | } | 1072 | } |
1073 | 1073 | ||
1074 | /* | 1074 | /* |
1075 | * XXX: should be changed to have proper ordering by opencoding simple_setsize | 1075 | * XXX(truncate): the truncate_setsize calls should be moved to the end. |
1076 | */ | 1076 | */ |
1077 | static int setattr_size(struct inode *inode, struct iattr *attr) | 1077 | static int setattr_size(struct inode *inode, struct iattr *attr) |
1078 | { | 1078 | { |
@@ -1084,10 +1084,8 @@ static int setattr_size(struct inode *inode, struct iattr *attr) | |||
1084 | error = gfs2_trans_begin(sdp, 0, sdp->sd_jdesc->jd_blocks); | 1084 | error = gfs2_trans_begin(sdp, 0, sdp->sd_jdesc->jd_blocks); |
1085 | if (error) | 1085 | if (error) |
1086 | return error; | 1086 | return error; |
1087 | error = simple_setsize(inode, attr->ia_size); | 1087 | truncate_setsize(inode, attr->ia_size); |
1088 | gfs2_trans_end(sdp); | 1088 | gfs2_trans_end(sdp); |
1089 | if (error) | ||
1090 | return error; | ||
1091 | } | 1089 | } |
1092 | 1090 | ||
1093 | error = gfs2_truncatei(ip, attr->ia_size); | 1091 | error = gfs2_truncatei(ip, attr->ia_size); |
@@ -1136,8 +1134,16 @@ static int setattr_chown(struct inode *inode, struct iattr *attr) | |||
1136 | if (error) | 1134 | if (error) |
1137 | goto out_end_trans; | 1135 | goto out_end_trans; |
1138 | 1136 | ||
1139 | error = inode_setattr(inode, attr); | 1137 | if ((attr->ia_valid & ATTR_SIZE) && |
1140 | gfs2_assert_warn(sdp, !error); | 1138 | attr->ia_size != i_size_read(inode)) { |
1139 | int error; | ||
1140 | |||
1141 | error = vmtruncate(inode, attr->ia_size); | ||
1142 | gfs2_assert_warn(sdp, !error); | ||
1143 | } | ||
1144 | |||
1145 | setattr_copy(inode, attr); | ||
1146 | mark_inode_dirty(inode); | ||
1141 | 1147 | ||
1142 | gfs2_trans_add_bh(ip->i_gl, dibh, 1); | 1148 | gfs2_trans_add_bh(ip->i_gl, dibh, 1); |
1143 | gfs2_dinode_out(ip, dibh->b_data); | 1149 | gfs2_dinode_out(ip, dibh->b_data); |
diff --git a/fs/gfs2/quota.c b/fs/gfs2/quota.c index 8f02d3db8f42..1bc6b5695e6d 100644 --- a/fs/gfs2/quota.c +++ b/fs/gfs2/quota.c | |||
@@ -787,15 +787,9 @@ static int do_sync(unsigned int num_qd, struct gfs2_quota_data **qda) | |||
787 | goto out; | 787 | goto out; |
788 | 788 | ||
789 | for (x = 0; x < num_qd; x++) { | 789 | for (x = 0; x < num_qd; x++) { |
790 | int alloc_required; | ||
791 | |||
792 | offset = qd2offset(qda[x]); | 790 | offset = qd2offset(qda[x]); |
793 | error = gfs2_write_alloc_required(ip, offset, | 791 | if (gfs2_write_alloc_required(ip, offset, |
794 | sizeof(struct gfs2_quota), | 792 | sizeof(struct gfs2_quota))) |
795 | &alloc_required); | ||
796 | if (error) | ||
797 | goto out_gunlock; | ||
798 | if (alloc_required) | ||
799 | nalloc++; | 793 | nalloc++; |
800 | } | 794 | } |
801 | 795 | ||
@@ -1455,10 +1449,10 @@ static int gfs2_quota_get_xstate(struct super_block *sb, | |||
1455 | 1449 | ||
1456 | switch (sdp->sd_args.ar_quota) { | 1450 | switch (sdp->sd_args.ar_quota) { |
1457 | case GFS2_QUOTA_ON: | 1451 | case GFS2_QUOTA_ON: |
1458 | fqs->qs_flags |= (XFS_QUOTA_UDQ_ENFD | XFS_QUOTA_GDQ_ENFD); | 1452 | fqs->qs_flags |= (FS_QUOTA_UDQ_ENFD | FS_QUOTA_GDQ_ENFD); |
1459 | /*FALLTHRU*/ | 1453 | /*FALLTHRU*/ |
1460 | case GFS2_QUOTA_ACCOUNT: | 1454 | case GFS2_QUOTA_ACCOUNT: |
1461 | fqs->qs_flags |= (XFS_QUOTA_UDQ_ACCT | XFS_QUOTA_GDQ_ACCT); | 1455 | fqs->qs_flags |= (FS_QUOTA_UDQ_ACCT | FS_QUOTA_GDQ_ACCT); |
1462 | break; | 1456 | break; |
1463 | case GFS2_QUOTA_OFF: | 1457 | case GFS2_QUOTA_OFF: |
1464 | break; | 1458 | break; |
@@ -1504,7 +1498,7 @@ static int gfs2_get_dqblk(struct super_block *sb, int type, qid_t id, | |||
1504 | 1498 | ||
1505 | qlvb = (struct gfs2_quota_lvb *)qd->qd_gl->gl_lvb; | 1499 | qlvb = (struct gfs2_quota_lvb *)qd->qd_gl->gl_lvb; |
1506 | fdq->d_version = FS_DQUOT_VERSION; | 1500 | fdq->d_version = FS_DQUOT_VERSION; |
1507 | fdq->d_flags = (type == QUOTA_USER) ? XFS_USER_QUOTA : XFS_GROUP_QUOTA; | 1501 | fdq->d_flags = (type == QUOTA_USER) ? FS_USER_QUOTA : FS_GROUP_QUOTA; |
1508 | fdq->d_id = id; | 1502 | fdq->d_id = id; |
1509 | fdq->d_blk_hardlimit = be64_to_cpu(qlvb->qb_limit); | 1503 | fdq->d_blk_hardlimit = be64_to_cpu(qlvb->qb_limit); |
1510 | fdq->d_blk_softlimit = be64_to_cpu(qlvb->qb_warn); | 1504 | fdq->d_blk_softlimit = be64_to_cpu(qlvb->qb_warn); |
@@ -1539,12 +1533,12 @@ static int gfs2_set_dqblk(struct super_block *sb, int type, qid_t id, | |||
1539 | switch(type) { | 1533 | switch(type) { |
1540 | case USRQUOTA: | 1534 | case USRQUOTA: |
1541 | type = QUOTA_USER; | 1535 | type = QUOTA_USER; |
1542 | if (fdq->d_flags != XFS_USER_QUOTA) | 1536 | if (fdq->d_flags != FS_USER_QUOTA) |
1543 | return -EINVAL; | 1537 | return -EINVAL; |
1544 | break; | 1538 | break; |
1545 | case GRPQUOTA: | 1539 | case GRPQUOTA: |
1546 | type = QUOTA_GROUP; | 1540 | type = QUOTA_GROUP; |
1547 | if (fdq->d_flags != XFS_GROUP_QUOTA) | 1541 | if (fdq->d_flags != FS_GROUP_QUOTA) |
1548 | return -EINVAL; | 1542 | return -EINVAL; |
1549 | break; | 1543 | break; |
1550 | default: | 1544 | default: |
@@ -1584,10 +1578,7 @@ static int gfs2_set_dqblk(struct super_block *sb, int type, qid_t id, | |||
1584 | goto out_i; | 1578 | goto out_i; |
1585 | 1579 | ||
1586 | offset = qd2offset(qd); | 1580 | offset = qd2offset(qd); |
1587 | error = gfs2_write_alloc_required(ip, offset, sizeof(struct gfs2_quota), | 1581 | alloc_required = gfs2_write_alloc_required(ip, offset, sizeof(struct gfs2_quota)); |
1588 | &alloc_required); | ||
1589 | if (error) | ||
1590 | goto out_i; | ||
1591 | if (alloc_required) { | 1582 | if (alloc_required) { |
1592 | al = gfs2_alloc_get(ip); | 1583 | al = gfs2_alloc_get(ip); |
1593 | if (al == NULL) | 1584 | if (al == NULL) |
diff --git a/fs/gfs2/recovery.c b/fs/gfs2/recovery.c index 4b9bece3d437..f7f89a94a5a4 100644 --- a/fs/gfs2/recovery.c +++ b/fs/gfs2/recovery.c | |||
@@ -14,7 +14,6 @@ | |||
14 | #include <linux/buffer_head.h> | 14 | #include <linux/buffer_head.h> |
15 | #include <linux/gfs2_ondisk.h> | 15 | #include <linux/gfs2_ondisk.h> |
16 | #include <linux/crc32.h> | 16 | #include <linux/crc32.h> |
17 | #include <linux/slow-work.h> | ||
18 | 17 | ||
19 | #include "gfs2.h" | 18 | #include "gfs2.h" |
20 | #include "incore.h" | 19 | #include "incore.h" |
@@ -28,6 +27,8 @@ | |||
28 | #include "util.h" | 27 | #include "util.h" |
29 | #include "dir.h" | 28 | #include "dir.h" |
30 | 29 | ||
30 | struct workqueue_struct *gfs_recovery_wq; | ||
31 | |||
31 | int gfs2_replay_read_block(struct gfs2_jdesc *jd, unsigned int blk, | 32 | int gfs2_replay_read_block(struct gfs2_jdesc *jd, unsigned int blk, |
32 | struct buffer_head **bh) | 33 | struct buffer_head **bh) |
33 | { | 34 | { |
@@ -443,23 +444,7 @@ static void gfs2_recovery_done(struct gfs2_sbd *sdp, unsigned int jid, | |||
443 | kobject_uevent_env(&sdp->sd_kobj, KOBJ_CHANGE, envp); | 444 | kobject_uevent_env(&sdp->sd_kobj, KOBJ_CHANGE, envp); |
444 | } | 445 | } |
445 | 446 | ||
446 | static int gfs2_recover_get_ref(struct slow_work *work) | 447 | void gfs2_recover_func(struct work_struct *work) |
447 | { | ||
448 | struct gfs2_jdesc *jd = container_of(work, struct gfs2_jdesc, jd_work); | ||
449 | if (test_and_set_bit(JDF_RECOVERY, &jd->jd_flags)) | ||
450 | return -EBUSY; | ||
451 | return 0; | ||
452 | } | ||
453 | |||
454 | static void gfs2_recover_put_ref(struct slow_work *work) | ||
455 | { | ||
456 | struct gfs2_jdesc *jd = container_of(work, struct gfs2_jdesc, jd_work); | ||
457 | clear_bit(JDF_RECOVERY, &jd->jd_flags); | ||
458 | smp_mb__after_clear_bit(); | ||
459 | wake_up_bit(&jd->jd_flags, JDF_RECOVERY); | ||
460 | } | ||
461 | |||
462 | static void gfs2_recover_work(struct slow_work *work) | ||
463 | { | 448 | { |
464 | struct gfs2_jdesc *jd = container_of(work, struct gfs2_jdesc, jd_work); | 449 | struct gfs2_jdesc *jd = container_of(work, struct gfs2_jdesc, jd_work); |
465 | struct gfs2_inode *ip = GFS2_I(jd->jd_inode); | 450 | struct gfs2_inode *ip = GFS2_I(jd->jd_inode); |
@@ -578,7 +563,7 @@ static void gfs2_recover_work(struct slow_work *work) | |||
578 | gfs2_glock_dq_uninit(&j_gh); | 563 | gfs2_glock_dq_uninit(&j_gh); |
579 | 564 | ||
580 | fs_info(sdp, "jid=%u: Done\n", jd->jd_jid); | 565 | fs_info(sdp, "jid=%u: Done\n", jd->jd_jid); |
581 | return; | 566 | goto done; |
582 | 567 | ||
583 | fail_gunlock_tr: | 568 | fail_gunlock_tr: |
584 | gfs2_glock_dq_uninit(&t_gh); | 569 | gfs2_glock_dq_uninit(&t_gh); |
@@ -590,32 +575,35 @@ fail_gunlock_j: | |||
590 | } | 575 | } |
591 | 576 | ||
592 | fs_info(sdp, "jid=%u: %s\n", jd->jd_jid, (error) ? "Failed" : "Done"); | 577 | fs_info(sdp, "jid=%u: %s\n", jd->jd_jid, (error) ? "Failed" : "Done"); |
593 | |||
594 | fail: | 578 | fail: |
595 | gfs2_recovery_done(sdp, jd->jd_jid, LM_RD_GAVEUP); | 579 | gfs2_recovery_done(sdp, jd->jd_jid, LM_RD_GAVEUP); |
580 | done: | ||
581 | clear_bit(JDF_RECOVERY, &jd->jd_flags); | ||
582 | smp_mb__after_clear_bit(); | ||
583 | wake_up_bit(&jd->jd_flags, JDF_RECOVERY); | ||
596 | } | 584 | } |
597 | 585 | ||
598 | struct slow_work_ops gfs2_recover_ops = { | ||
599 | .owner = THIS_MODULE, | ||
600 | .get_ref = gfs2_recover_get_ref, | ||
601 | .put_ref = gfs2_recover_put_ref, | ||
602 | .execute = gfs2_recover_work, | ||
603 | }; | ||
604 | |||
605 | |||
606 | static int gfs2_recovery_wait(void *word) | 586 | static int gfs2_recovery_wait(void *word) |
607 | { | 587 | { |
608 | schedule(); | 588 | schedule(); |
609 | return 0; | 589 | return 0; |
610 | } | 590 | } |
611 | 591 | ||
612 | int gfs2_recover_journal(struct gfs2_jdesc *jd) | 592 | int gfs2_recover_journal(struct gfs2_jdesc *jd, bool wait) |
613 | { | 593 | { |
614 | int rv; | 594 | int rv; |
615 | rv = slow_work_enqueue(&jd->jd_work); | 595 | |
616 | if (rv) | 596 | if (test_and_set_bit(JDF_RECOVERY, &jd->jd_flags)) |
617 | return rv; | 597 | return -EBUSY; |
618 | wait_on_bit(&jd->jd_flags, JDF_RECOVERY, gfs2_recovery_wait, TASK_UNINTERRUPTIBLE); | 598 | |
599 | /* we have JDF_RECOVERY, queue should always succeed */ | ||
600 | rv = queue_work(gfs_recovery_wq, &jd->jd_work); | ||
601 | BUG_ON(!rv); | ||
602 | |||
603 | if (wait) | ||
604 | wait_on_bit(&jd->jd_flags, JDF_RECOVERY, gfs2_recovery_wait, | ||
605 | TASK_UNINTERRUPTIBLE); | ||
606 | |||
619 | return 0; | 607 | return 0; |
620 | } | 608 | } |
621 | 609 | ||
diff --git a/fs/gfs2/recovery.h b/fs/gfs2/recovery.h index 1616ac22569a..2226136c7647 100644 --- a/fs/gfs2/recovery.h +++ b/fs/gfs2/recovery.h | |||
@@ -12,6 +12,8 @@ | |||
12 | 12 | ||
13 | #include "incore.h" | 13 | #include "incore.h" |
14 | 14 | ||
15 | extern struct workqueue_struct *gfs_recovery_wq; | ||
16 | |||
15 | static inline void gfs2_replay_incr_blk(struct gfs2_sbd *sdp, unsigned int *blk) | 17 | static inline void gfs2_replay_incr_blk(struct gfs2_sbd *sdp, unsigned int *blk) |
16 | { | 18 | { |
17 | if (++*blk == sdp->sd_jdesc->jd_blocks) | 19 | if (++*blk == sdp->sd_jdesc->jd_blocks) |
@@ -27,8 +29,8 @@ extern void gfs2_revoke_clean(struct gfs2_sbd *sdp); | |||
27 | 29 | ||
28 | extern int gfs2_find_jhead(struct gfs2_jdesc *jd, | 30 | extern int gfs2_find_jhead(struct gfs2_jdesc *jd, |
29 | struct gfs2_log_header_host *head); | 31 | struct gfs2_log_header_host *head); |
30 | extern int gfs2_recover_journal(struct gfs2_jdesc *gfs2_jd); | 32 | extern int gfs2_recover_journal(struct gfs2_jdesc *gfs2_jd, bool wait); |
31 | extern struct slow_work_ops gfs2_recover_ops; | 33 | extern void gfs2_recover_func(struct work_struct *work); |
32 | 34 | ||
33 | #endif /* __RECOVERY_DOT_H__ */ | 35 | #endif /* __RECOVERY_DOT_H__ */ |
34 | 36 | ||
diff --git a/fs/gfs2/super.c b/fs/gfs2/super.c index 4d1aad38f1b1..77cb9f830ee4 100644 --- a/fs/gfs2/super.c +++ b/fs/gfs2/super.c | |||
@@ -342,8 +342,6 @@ int gfs2_jdesc_check(struct gfs2_jdesc *jd) | |||
342 | { | 342 | { |
343 | struct gfs2_inode *ip = GFS2_I(jd->jd_inode); | 343 | struct gfs2_inode *ip = GFS2_I(jd->jd_inode); |
344 | struct gfs2_sbd *sdp = GFS2_SB(jd->jd_inode); | 344 | struct gfs2_sbd *sdp = GFS2_SB(jd->jd_inode); |
345 | int ar; | ||
346 | int error; | ||
347 | 345 | ||
348 | if (ip->i_disksize < (8 << 20) || ip->i_disksize > (1 << 30) || | 346 | if (ip->i_disksize < (8 << 20) || ip->i_disksize > (1 << 30) || |
349 | (ip->i_disksize & (sdp->sd_sb.sb_bsize - 1))) { | 347 | (ip->i_disksize & (sdp->sd_sb.sb_bsize - 1))) { |
@@ -352,13 +350,12 @@ int gfs2_jdesc_check(struct gfs2_jdesc *jd) | |||
352 | } | 350 | } |
353 | jd->jd_blocks = ip->i_disksize >> sdp->sd_sb.sb_bsize_shift; | 351 | jd->jd_blocks = ip->i_disksize >> sdp->sd_sb.sb_bsize_shift; |
354 | 352 | ||
355 | error = gfs2_write_alloc_required(ip, 0, ip->i_disksize, &ar); | 353 | if (gfs2_write_alloc_required(ip, 0, ip->i_disksize)) { |
356 | if (!error && ar) { | ||
357 | gfs2_consist_inode(ip); | 354 | gfs2_consist_inode(ip); |
358 | error = -EIO; | 355 | return -EIO; |
359 | } | 356 | } |
360 | 357 | ||
361 | return error; | 358 | return 0; |
362 | } | 359 | } |
363 | 360 | ||
364 | /** | 361 | /** |
@@ -1191,7 +1188,7 @@ static int gfs2_remount_fs(struct super_block *sb, int *flags, char *data) | |||
1191 | * node for later deallocation. | 1188 | * node for later deallocation. |
1192 | */ | 1189 | */ |
1193 | 1190 | ||
1194 | static void gfs2_drop_inode(struct inode *inode) | 1191 | static int gfs2_drop_inode(struct inode *inode) |
1195 | { | 1192 | { |
1196 | struct gfs2_inode *ip = GFS2_I(inode); | 1193 | struct gfs2_inode *ip = GFS2_I(inode); |
1197 | 1194 | ||
@@ -1200,26 +1197,7 @@ static void gfs2_drop_inode(struct inode *inode) | |||
1200 | if (gl && test_bit(GLF_DEMOTE, &gl->gl_flags)) | 1197 | if (gl && test_bit(GLF_DEMOTE, &gl->gl_flags)) |
1201 | clear_nlink(inode); | 1198 | clear_nlink(inode); |
1202 | } | 1199 | } |
1203 | generic_drop_inode(inode); | 1200 | return generic_drop_inode(inode); |
1204 | } | ||
1205 | |||
1206 | /** | ||
1207 | * gfs2_clear_inode - Deallocate an inode when VFS is done with it | ||
1208 | * @inode: The VFS inode | ||
1209 | * | ||
1210 | */ | ||
1211 | |||
1212 | static void gfs2_clear_inode(struct inode *inode) | ||
1213 | { | ||
1214 | struct gfs2_inode *ip = GFS2_I(inode); | ||
1215 | |||
1216 | ip->i_gl->gl_object = NULL; | ||
1217 | gfs2_glock_put(ip->i_gl); | ||
1218 | ip->i_gl = NULL; | ||
1219 | if (ip->i_iopen_gh.gh_gl) { | ||
1220 | ip->i_iopen_gh.gh_gl->gl_object = NULL; | ||
1221 | gfs2_glock_dq_uninit(&ip->i_iopen_gh); | ||
1222 | } | ||
1223 | } | 1201 | } |
1224 | 1202 | ||
1225 | static int is_ancestor(const struct dentry *d1, const struct dentry *d2) | 1203 | static int is_ancestor(const struct dentry *d1, const struct dentry *d2) |
@@ -1347,13 +1325,16 @@ static int gfs2_show_options(struct seq_file *s, struct vfsmount *mnt) | |||
1347 | * is safe, just less efficient. | 1325 | * is safe, just less efficient. |
1348 | */ | 1326 | */ |
1349 | 1327 | ||
1350 | static void gfs2_delete_inode(struct inode *inode) | 1328 | static void gfs2_evict_inode(struct inode *inode) |
1351 | { | 1329 | { |
1352 | struct gfs2_sbd *sdp = inode->i_sb->s_fs_info; | 1330 | struct gfs2_sbd *sdp = inode->i_sb->s_fs_info; |
1353 | struct gfs2_inode *ip = GFS2_I(inode); | 1331 | struct gfs2_inode *ip = GFS2_I(inode); |
1354 | struct gfs2_holder gh; | 1332 | struct gfs2_holder gh; |
1355 | int error; | 1333 | int error; |
1356 | 1334 | ||
1335 | if (inode->i_nlink) | ||
1336 | goto out; | ||
1337 | |||
1357 | error = gfs2_glock_nq_init(ip->i_gl, LM_ST_EXCLUSIVE, 0, &gh); | 1338 | error = gfs2_glock_nq_init(ip->i_gl, LM_ST_EXCLUSIVE, 0, &gh); |
1358 | if (unlikely(error)) { | 1339 | if (unlikely(error)) { |
1359 | gfs2_glock_dq_uninit(&ip->i_iopen_gh); | 1340 | gfs2_glock_dq_uninit(&ip->i_iopen_gh); |
@@ -1407,10 +1388,18 @@ out_unlock: | |||
1407 | gfs2_holder_uninit(&ip->i_iopen_gh); | 1388 | gfs2_holder_uninit(&ip->i_iopen_gh); |
1408 | gfs2_glock_dq_uninit(&gh); | 1389 | gfs2_glock_dq_uninit(&gh); |
1409 | if (error && error != GLR_TRYFAILED && error != -EROFS) | 1390 | if (error && error != GLR_TRYFAILED && error != -EROFS) |
1410 | fs_warn(sdp, "gfs2_delete_inode: %d\n", error); | 1391 | fs_warn(sdp, "gfs2_evict_inode: %d\n", error); |
1411 | out: | 1392 | out: |
1412 | truncate_inode_pages(&inode->i_data, 0); | 1393 | truncate_inode_pages(&inode->i_data, 0); |
1413 | clear_inode(inode); | 1394 | end_writeback(inode); |
1395 | |||
1396 | ip->i_gl->gl_object = NULL; | ||
1397 | gfs2_glock_put(ip->i_gl); | ||
1398 | ip->i_gl = NULL; | ||
1399 | if (ip->i_iopen_gh.gh_gl) { | ||
1400 | ip->i_iopen_gh.gh_gl->gl_object = NULL; | ||
1401 | gfs2_glock_dq_uninit(&ip->i_iopen_gh); | ||
1402 | } | ||
1414 | } | 1403 | } |
1415 | 1404 | ||
1416 | static struct inode *gfs2_alloc_inode(struct super_block *sb) | 1405 | static struct inode *gfs2_alloc_inode(struct super_block *sb) |
@@ -1434,14 +1423,13 @@ const struct super_operations gfs2_super_ops = { | |||
1434 | .alloc_inode = gfs2_alloc_inode, | 1423 | .alloc_inode = gfs2_alloc_inode, |
1435 | .destroy_inode = gfs2_destroy_inode, | 1424 | .destroy_inode = gfs2_destroy_inode, |
1436 | .write_inode = gfs2_write_inode, | 1425 | .write_inode = gfs2_write_inode, |
1437 | .delete_inode = gfs2_delete_inode, | 1426 | .evict_inode = gfs2_evict_inode, |
1438 | .put_super = gfs2_put_super, | 1427 | .put_super = gfs2_put_super, |
1439 | .sync_fs = gfs2_sync_fs, | 1428 | .sync_fs = gfs2_sync_fs, |
1440 | .freeze_fs = gfs2_freeze, | 1429 | .freeze_fs = gfs2_freeze, |
1441 | .unfreeze_fs = gfs2_unfreeze, | 1430 | .unfreeze_fs = gfs2_unfreeze, |
1442 | .statfs = gfs2_statfs, | 1431 | .statfs = gfs2_statfs, |
1443 | .remount_fs = gfs2_remount_fs, | 1432 | .remount_fs = gfs2_remount_fs, |
1444 | .clear_inode = gfs2_clear_inode, | ||
1445 | .drop_inode = gfs2_drop_inode, | 1433 | .drop_inode = gfs2_drop_inode, |
1446 | .show_options = gfs2_show_options, | 1434 | .show_options = gfs2_show_options, |
1447 | }; | 1435 | }; |
diff --git a/fs/gfs2/sys.c b/fs/gfs2/sys.c index 37f5393e68e6..ccacffd2faaa 100644 --- a/fs/gfs2/sys.c +++ b/fs/gfs2/sys.c | |||
@@ -25,6 +25,7 @@ | |||
25 | #include "quota.h" | 25 | #include "quota.h" |
26 | #include "util.h" | 26 | #include "util.h" |
27 | #include "glops.h" | 27 | #include "glops.h" |
28 | #include "recovery.h" | ||
28 | 29 | ||
29 | struct gfs2_attr { | 30 | struct gfs2_attr { |
30 | struct attribute attr; | 31 | struct attribute attr; |
@@ -325,6 +326,30 @@ static ssize_t lkfirst_show(struct gfs2_sbd *sdp, char *buf) | |||
325 | return sprintf(buf, "%d\n", ls->ls_first); | 326 | return sprintf(buf, "%d\n", ls->ls_first); |
326 | } | 327 | } |
327 | 328 | ||
329 | static ssize_t lkfirst_store(struct gfs2_sbd *sdp, const char *buf, size_t len) | ||
330 | { | ||
331 | unsigned first; | ||
332 | int rv; | ||
333 | |||
334 | rv = sscanf(buf, "%u", &first); | ||
335 | if (rv != 1 || first > 1) | ||
336 | return -EINVAL; | ||
337 | spin_lock(&sdp->sd_jindex_spin); | ||
338 | rv = -EBUSY; | ||
339 | if (test_bit(SDF_NOJOURNALID, &sdp->sd_flags) == 0) | ||
340 | goto out; | ||
341 | rv = -EINVAL; | ||
342 | if (sdp->sd_args.ar_spectator) | ||
343 | goto out; | ||
344 | if (sdp->sd_lockstruct.ls_ops->lm_mount == NULL) | ||
345 | goto out; | ||
346 | sdp->sd_lockstruct.ls_first = first; | ||
347 | rv = 0; | ||
348 | out: | ||
349 | spin_unlock(&sdp->sd_jindex_spin); | ||
350 | return rv ? rv : len; | ||
351 | } | ||
352 | |||
328 | static ssize_t first_done_show(struct gfs2_sbd *sdp, char *buf) | 353 | static ssize_t first_done_show(struct gfs2_sbd *sdp, char *buf) |
329 | { | 354 | { |
330 | struct lm_lockstruct *ls = &sdp->sd_lockstruct; | 355 | struct lm_lockstruct *ls = &sdp->sd_lockstruct; |
@@ -352,7 +377,7 @@ static ssize_t recover_store(struct gfs2_sbd *sdp, const char *buf, size_t len) | |||
352 | list_for_each_entry(jd, &sdp->sd_jindex_list, jd_list) { | 377 | list_for_each_entry(jd, &sdp->sd_jindex_list, jd_list) { |
353 | if (jd->jd_jid != jid) | 378 | if (jd->jd_jid != jid) |
354 | continue; | 379 | continue; |
355 | rv = slow_work_enqueue(&jd->jd_work); | 380 | rv = gfs2_recover_journal(jd, false); |
356 | break; | 381 | break; |
357 | } | 382 | } |
358 | out: | 383 | out: |
@@ -377,14 +402,41 @@ static ssize_t jid_show(struct gfs2_sbd *sdp, char *buf) | |||
377 | return sprintf(buf, "%u\n", sdp->sd_lockstruct.ls_jid); | 402 | return sprintf(buf, "%u\n", sdp->sd_lockstruct.ls_jid); |
378 | } | 403 | } |
379 | 404 | ||
405 | static ssize_t jid_store(struct gfs2_sbd *sdp, const char *buf, size_t len) | ||
406 | { | ||
407 | unsigned jid; | ||
408 | int rv; | ||
409 | |||
410 | rv = sscanf(buf, "%u", &jid); | ||
411 | if (rv != 1) | ||
412 | return -EINVAL; | ||
413 | |||
414 | spin_lock(&sdp->sd_jindex_spin); | ||
415 | rv = -EINVAL; | ||
416 | if (sdp->sd_args.ar_spectator) | ||
417 | goto out; | ||
418 | if (sdp->sd_lockstruct.ls_ops->lm_mount == NULL) | ||
419 | goto out; | ||
420 | rv = -EBUSY; | ||
421 | if (test_and_clear_bit(SDF_NOJOURNALID, &sdp->sd_flags) == 0) | ||
422 | goto out; | ||
423 | sdp->sd_lockstruct.ls_jid = jid; | ||
424 | smp_mb__after_clear_bit(); | ||
425 | wake_up_bit(&sdp->sd_flags, SDF_NOJOURNALID); | ||
426 | rv = 0; | ||
427 | out: | ||
428 | spin_unlock(&sdp->sd_jindex_spin); | ||
429 | return rv ? rv : len; | ||
430 | } | ||
431 | |||
380 | #define GDLM_ATTR(_name,_mode,_show,_store) \ | 432 | #define GDLM_ATTR(_name,_mode,_show,_store) \ |
381 | static struct gfs2_attr gdlm_attr_##_name = __ATTR(_name,_mode,_show,_store) | 433 | static struct gfs2_attr gdlm_attr_##_name = __ATTR(_name,_mode,_show,_store) |
382 | 434 | ||
383 | GDLM_ATTR(proto_name, 0444, proto_name_show, NULL); | 435 | GDLM_ATTR(proto_name, 0444, proto_name_show, NULL); |
384 | GDLM_ATTR(block, 0644, block_show, block_store); | 436 | GDLM_ATTR(block, 0644, block_show, block_store); |
385 | GDLM_ATTR(withdraw, 0644, withdraw_show, withdraw_store); | 437 | GDLM_ATTR(withdraw, 0644, withdraw_show, withdraw_store); |
386 | GDLM_ATTR(jid, 0444, jid_show, NULL); | 438 | GDLM_ATTR(jid, 0644, jid_show, jid_store); |
387 | GDLM_ATTR(first, 0444, lkfirst_show, NULL); | 439 | GDLM_ATTR(first, 0644, lkfirst_show, lkfirst_store); |
388 | GDLM_ATTR(first_done, 0444, first_done_show, NULL); | 440 | GDLM_ATTR(first_done, 0444, first_done_show, NULL); |
389 | GDLM_ATTR(recover, 0600, NULL, recover_store); | 441 | GDLM_ATTR(recover, 0600, NULL, recover_store); |
390 | GDLM_ATTR(recover_done, 0444, recover_done_show, NULL); | 442 | GDLM_ATTR(recover_done, 0444, recover_done_show, NULL); |
@@ -564,7 +616,7 @@ static int gfs2_uevent(struct kset *kset, struct kobject *kobj, | |||
564 | 616 | ||
565 | add_uevent_var(env, "LOCKTABLE=%s", sdp->sd_table_name); | 617 | add_uevent_var(env, "LOCKTABLE=%s", sdp->sd_table_name); |
566 | add_uevent_var(env, "LOCKPROTO=%s", sdp->sd_proto_name); | 618 | add_uevent_var(env, "LOCKPROTO=%s", sdp->sd_proto_name); |
567 | if (!sdp->sd_args.ar_spectator) | 619 | if (!test_bit(SDF_NOJOURNALID, &sdp->sd_flags)) |
568 | add_uevent_var(env, "JOURNALID=%u", sdp->sd_lockstruct.ls_jid); | 620 | add_uevent_var(env, "JOURNALID=%u", sdp->sd_lockstruct.ls_jid); |
569 | if (gfs2_uuid_valid(uuid)) | 621 | if (gfs2_uuid_valid(uuid)) |
570 | add_uevent_var(env, "UUID=%pUB", uuid); | 622 | add_uevent_var(env, "UUID=%pUB", uuid); |
diff --git a/fs/gfs2/xattr.c b/fs/gfs2/xattr.c index 82f93da00d1b..776af6eb4bcb 100644 --- a/fs/gfs2/xattr.c +++ b/fs/gfs2/xattr.c | |||
@@ -1296,6 +1296,7 @@ fail: | |||
1296 | 1296 | ||
1297 | int gfs2_xattr_acl_chmod(struct gfs2_inode *ip, struct iattr *attr, char *data) | 1297 | int gfs2_xattr_acl_chmod(struct gfs2_inode *ip, struct iattr *attr, char *data) |
1298 | { | 1298 | { |
1299 | struct inode *inode = &ip->i_inode; | ||
1299 | struct gfs2_sbd *sdp = GFS2_SB(&ip->i_inode); | 1300 | struct gfs2_sbd *sdp = GFS2_SB(&ip->i_inode); |
1300 | struct gfs2_ea_location el; | 1301 | struct gfs2_ea_location el; |
1301 | struct buffer_head *dibh; | 1302 | struct buffer_head *dibh; |
@@ -1321,14 +1322,25 @@ int gfs2_xattr_acl_chmod(struct gfs2_inode *ip, struct iattr *attr, char *data) | |||
1321 | return error; | 1322 | return error; |
1322 | 1323 | ||
1323 | error = gfs2_meta_inode_buffer(ip, &dibh); | 1324 | error = gfs2_meta_inode_buffer(ip, &dibh); |
1324 | if (!error) { | 1325 | if (error) |
1325 | error = inode_setattr(&ip->i_inode, attr); | 1326 | goto out_trans_end; |
1326 | gfs2_assert_warn(GFS2_SB(&ip->i_inode), !error); | 1327 | |
1327 | gfs2_trans_add_bh(ip->i_gl, dibh, 1); | 1328 | if ((attr->ia_valid & ATTR_SIZE) && |
1328 | gfs2_dinode_out(ip, dibh->b_data); | 1329 | attr->ia_size != i_size_read(inode)) { |
1329 | brelse(dibh); | 1330 | int error; |
1331 | |||
1332 | error = vmtruncate(inode, attr->ia_size); | ||
1333 | gfs2_assert_warn(GFS2_SB(inode), !error); | ||
1330 | } | 1334 | } |
1331 | 1335 | ||
1336 | setattr_copy(inode, attr); | ||
1337 | mark_inode_dirty(inode); | ||
1338 | |||
1339 | gfs2_trans_add_bh(ip->i_gl, dibh, 1); | ||
1340 | gfs2_dinode_out(ip, dibh->b_data); | ||
1341 | brelse(dibh); | ||
1342 | |||
1343 | out_trans_end: | ||
1332 | gfs2_trans_end(sdp); | 1344 | gfs2_trans_end(sdp); |
1333 | return error; | 1345 | return error; |
1334 | } | 1346 | } |