diff options
| -rw-r--r-- | Documentation/filesystems/gfs2.txt | 12 | ||||
| -rw-r--r-- | fs/gfs2/aops.c | 8 | ||||
| -rw-r--r-- | fs/gfs2/bmap.c | 17 | ||||
| -rw-r--r-- | fs/gfs2/dir.c | 2 | ||||
| -rw-r--r-- | fs/gfs2/export.c | 2 | ||||
| -rw-r--r-- | fs/gfs2/glock.c | 3 | ||||
| -rw-r--r-- | fs/gfs2/incore.h | 11 | ||||
| -rw-r--r-- | fs/gfs2/inode.c | 101 | ||||
| -rw-r--r-- | fs/gfs2/inode.h | 5 | ||||
| -rw-r--r-- | fs/gfs2/log.c | 158 | ||||
| -rw-r--r-- | fs/gfs2/log.h | 1 | ||||
| -rw-r--r-- | fs/gfs2/lops.c | 2 | ||||
| -rw-r--r-- | fs/gfs2/main.c | 2 | ||||
| -rw-r--r-- | fs/gfs2/meta_io.c | 5 | ||||
| -rw-r--r-- | fs/gfs2/ops_fstype.c | 19 | ||||
| -rw-r--r-- | fs/gfs2/quota.c | 102 | ||||
| -rw-r--r-- | fs/gfs2/rgrp.c | 68 | ||||
| -rw-r--r-- | fs/gfs2/super.c | 11 | ||||
| -rw-r--r-- | fs/gfs2/sys.c | 6 | ||||
| -rw-r--r-- | fs/gfs2/trans.c | 18 |
20 files changed, 368 insertions, 185 deletions
diff --git a/Documentation/filesystems/gfs2.txt b/Documentation/filesystems/gfs2.txt index 5e3ab8f3beff..0b59c0200912 100644 --- a/Documentation/filesystems/gfs2.txt +++ b/Documentation/filesystems/gfs2.txt | |||
| @@ -1,7 +1,7 @@ | |||
| 1 | Global File System | 1 | Global File System |
| 2 | ------------------ | 2 | ------------------ |
| 3 | 3 | ||
| 4 | http://sources.redhat.com/cluster/ | 4 | http://sources.redhat.com/cluster/wiki/ |
| 5 | 5 | ||
| 6 | GFS is a cluster file system. It allows a cluster of computers to | 6 | GFS is a cluster file system. It allows a cluster of computers to |
| 7 | simultaneously use a block device that is shared between them (with FC, | 7 | simultaneously use a block device that is shared between them (with FC, |
| @@ -36,11 +36,11 @@ GFS2 is not on-disk compatible with previous versions of GFS, but it | |||
| 36 | is pretty close. | 36 | is pretty close. |
| 37 | 37 | ||
| 38 | The following man pages can be found at the URL above: | 38 | The following man pages can be found at the URL above: |
| 39 | fsck.gfs2 to repair a filesystem | 39 | fsck.gfs2 to repair a filesystem |
| 40 | gfs2_grow to expand a filesystem online | 40 | gfs2_grow to expand a filesystem online |
| 41 | gfs2_jadd to add journals to a filesystem online | 41 | gfs2_jadd to add journals to a filesystem online |
| 42 | gfs2_tool to manipulate, examine and tune a filesystem | 42 | gfs2_tool to manipulate, examine and tune a filesystem |
| 43 | gfs2_quota to examine and change quota values in a filesystem | 43 | gfs2_quota to examine and change quota values in a filesystem |
| 44 | gfs2_convert to convert a gfs filesystem to gfs2 in-place | 44 | gfs2_convert to convert a gfs filesystem to gfs2 in-place |
| 45 | mount.gfs2 to help mount(8) mount a filesystem | 45 | mount.gfs2 to help mount(8) mount a filesystem |
| 46 | mkfs.gfs2 to make a filesystem | 46 | mkfs.gfs2 to make a filesystem |
diff --git a/fs/gfs2/aops.c b/fs/gfs2/aops.c index 0c1d0b82dcf1..a739a0a48067 100644 --- a/fs/gfs2/aops.c +++ b/fs/gfs2/aops.c | |||
| @@ -418,6 +418,7 @@ static int gfs2_jdata_writepages(struct address_space *mapping, | |||
| 418 | static int stuffed_readpage(struct gfs2_inode *ip, struct page *page) | 418 | static int stuffed_readpage(struct gfs2_inode *ip, struct page *page) |
| 419 | { | 419 | { |
| 420 | struct buffer_head *dibh; | 420 | struct buffer_head *dibh; |
| 421 | u64 dsize = i_size_read(&ip->i_inode); | ||
| 421 | void *kaddr; | 422 | void *kaddr; |
| 422 | int error; | 423 | int error; |
| 423 | 424 | ||
| @@ -437,9 +438,10 @@ static int stuffed_readpage(struct gfs2_inode *ip, struct page *page) | |||
| 437 | return error; | 438 | return error; |
| 438 | 439 | ||
| 439 | kaddr = kmap_atomic(page, KM_USER0); | 440 | kaddr = kmap_atomic(page, KM_USER0); |
| 440 | memcpy(kaddr, dibh->b_data + sizeof(struct gfs2_dinode), | 441 | if (dsize > (dibh->b_size - sizeof(struct gfs2_dinode))) |
| 441 | ip->i_disksize); | 442 | dsize = (dibh->b_size - sizeof(struct gfs2_dinode)); |
| 442 | memset(kaddr + ip->i_disksize, 0, PAGE_CACHE_SIZE - ip->i_disksize); | 443 | memcpy(kaddr, dibh->b_data + sizeof(struct gfs2_dinode), dsize); |
| 444 | memset(kaddr + dsize, 0, PAGE_CACHE_SIZE - dsize); | ||
| 443 | kunmap_atomic(kaddr, KM_USER0); | 445 | kunmap_atomic(kaddr, KM_USER0); |
| 444 | flush_dcache_page(page); | 446 | flush_dcache_page(page); |
| 445 | brelse(dibh); | 447 | brelse(dibh); |
diff --git a/fs/gfs2/bmap.c b/fs/gfs2/bmap.c index 5e411d5f4697..4a48c0f4b402 100644 --- a/fs/gfs2/bmap.c +++ b/fs/gfs2/bmap.c | |||
| @@ -71,11 +71,13 @@ static int gfs2_unstuffer_page(struct gfs2_inode *ip, struct buffer_head *dibh, | |||
| 71 | 71 | ||
| 72 | if (!PageUptodate(page)) { | 72 | if (!PageUptodate(page)) { |
| 73 | void *kaddr = kmap(page); | 73 | void *kaddr = kmap(page); |
| 74 | u64 dsize = i_size_read(inode); | ||
| 75 | |||
| 76 | if (dsize > (dibh->b_size - sizeof(struct gfs2_dinode))) | ||
| 77 | dsize = dibh->b_size - sizeof(struct gfs2_dinode); | ||
| 74 | 78 | ||
| 75 | memcpy(kaddr, dibh->b_data + sizeof(struct gfs2_dinode), | 79 | memcpy(kaddr, dibh->b_data + sizeof(struct gfs2_dinode), dsize); |
| 76 | ip->i_disksize); | 80 | memset(kaddr + dsize, 0, PAGE_CACHE_SIZE - dsize); |
| 77 | memset(kaddr + ip->i_disksize, 0, | ||
| 78 | PAGE_CACHE_SIZE - ip->i_disksize); | ||
| 79 | kunmap(page); | 81 | kunmap(page); |
| 80 | 82 | ||
| 81 | SetPageUptodate(page); | 83 | SetPageUptodate(page); |
| @@ -1038,13 +1040,14 @@ static int trunc_start(struct gfs2_inode *ip, u64 size) | |||
| 1038 | goto out; | 1040 | goto out; |
| 1039 | 1041 | ||
| 1040 | if (gfs2_is_stuffed(ip)) { | 1042 | if (gfs2_is_stuffed(ip)) { |
| 1041 | ip->i_disksize = size; | 1043 | u64 dsize = size + sizeof(struct gfs2_inode); |
| 1042 | ip->i_inode.i_mtime = ip->i_inode.i_ctime = CURRENT_TIME; | 1044 | ip->i_inode.i_mtime = ip->i_inode.i_ctime = CURRENT_TIME; |
| 1043 | gfs2_trans_add_bh(ip->i_gl, dibh, 1); | 1045 | gfs2_trans_add_bh(ip->i_gl, dibh, 1); |
| 1044 | gfs2_dinode_out(ip, dibh->b_data); | 1046 | gfs2_dinode_out(ip, dibh->b_data); |
| 1045 | gfs2_buffer_clear_tail(dibh, sizeof(struct gfs2_dinode) + size); | 1047 | if (dsize > dibh->b_size) |
| 1048 | dsize = dibh->b_size; | ||
| 1049 | gfs2_buffer_clear_tail(dibh, dsize); | ||
| 1046 | error = 1; | 1050 | error = 1; |
| 1047 | |||
| 1048 | } else { | 1051 | } else { |
| 1049 | if (size & (u64)(sdp->sd_sb.sb_bsize - 1)) | 1052 | if (size & (u64)(sdp->sd_sb.sb_bsize - 1)) |
| 1050 | error = gfs2_block_truncate_page(ip->i_inode.i_mapping); | 1053 | error = gfs2_block_truncate_page(ip->i_inode.i_mapping); |
diff --git a/fs/gfs2/dir.c b/fs/gfs2/dir.c index 25fddc100f18..8295c5b5d4a9 100644 --- a/fs/gfs2/dir.c +++ b/fs/gfs2/dir.c | |||
| @@ -1475,7 +1475,7 @@ struct inode *gfs2_dir_search(struct inode *dir, const struct qstr *name) | |||
| 1475 | inode = gfs2_inode_lookup(dir->i_sb, | 1475 | inode = gfs2_inode_lookup(dir->i_sb, |
| 1476 | be16_to_cpu(dent->de_type), | 1476 | be16_to_cpu(dent->de_type), |
| 1477 | be64_to_cpu(dent->de_inum.no_addr), | 1477 | be64_to_cpu(dent->de_inum.no_addr), |
| 1478 | be64_to_cpu(dent->de_inum.no_formal_ino), 0); | 1478 | be64_to_cpu(dent->de_inum.no_formal_ino)); |
| 1479 | brelse(bh); | 1479 | brelse(bh); |
| 1480 | return inode; | 1480 | return inode; |
| 1481 | } | 1481 | } |
diff --git a/fs/gfs2/export.c b/fs/gfs2/export.c index c22c21174833..dfe237a3f8ad 100644 --- a/fs/gfs2/export.c +++ b/fs/gfs2/export.c | |||
| @@ -168,7 +168,7 @@ static struct dentry *gfs2_get_dentry(struct super_block *sb, | |||
| 168 | if (error) | 168 | if (error) |
| 169 | goto fail; | 169 | goto fail; |
| 170 | 170 | ||
| 171 | inode = gfs2_inode_lookup(sb, DT_UNKNOWN, inum->no_addr, 0, 0); | 171 | inode = gfs2_inode_lookup(sb, DT_UNKNOWN, inum->no_addr, 0); |
| 172 | if (IS_ERR(inode)) { | 172 | if (IS_ERR(inode)) { |
| 173 | error = PTR_ERR(inode); | 173 | error = PTR_ERR(inode); |
| 174 | goto fail; | 174 | goto fail; |
diff --git a/fs/gfs2/glock.c b/fs/gfs2/glock.c index 454d4b4eb36b..ddcdbf493536 100644 --- a/fs/gfs2/glock.c +++ b/fs/gfs2/glock.c | |||
| @@ -855,6 +855,9 @@ void gfs2_holder_reinit(unsigned int state, unsigned flags, struct gfs2_holder * | |||
| 855 | gh->gh_flags = flags; | 855 | gh->gh_flags = flags; |
| 856 | gh->gh_iflags = 0; | 856 | gh->gh_iflags = 0; |
| 857 | gh->gh_ip = (unsigned long)__builtin_return_address(0); | 857 | gh->gh_ip = (unsigned long)__builtin_return_address(0); |
| 858 | if (gh->gh_owner_pid) | ||
| 859 | put_pid(gh->gh_owner_pid); | ||
| 860 | gh->gh_owner_pid = get_pid(task_pid(current)); | ||
| 858 | } | 861 | } |
| 859 | 862 | ||
| 860 | /** | 863 | /** |
diff --git a/fs/gfs2/incore.h b/fs/gfs2/incore.h index 3aac46f6853e..b5d7363b22da 100644 --- a/fs/gfs2/incore.h +++ b/fs/gfs2/incore.h | |||
| @@ -439,9 +439,6 @@ struct gfs2_args { | |||
| 439 | struct gfs2_tune { | 439 | struct gfs2_tune { |
| 440 | spinlock_t gt_spin; | 440 | spinlock_t gt_spin; |
| 441 | 441 | ||
| 442 | unsigned int gt_incore_log_blocks; | ||
| 443 | unsigned int gt_log_flush_secs; | ||
| 444 | |||
| 445 | unsigned int gt_logd_secs; | 442 | unsigned int gt_logd_secs; |
| 446 | 443 | ||
| 447 | unsigned int gt_quota_simul_sync; /* Max quotavals to sync at once */ | 444 | unsigned int gt_quota_simul_sync; /* Max quotavals to sync at once */ |
| @@ -462,6 +459,7 @@ enum { | |||
| 462 | SDF_SHUTDOWN = 2, | 459 | SDF_SHUTDOWN = 2, |
| 463 | SDF_NOBARRIERS = 3, | 460 | SDF_NOBARRIERS = 3, |
| 464 | SDF_NORECOVERY = 4, | 461 | SDF_NORECOVERY = 4, |
| 462 | SDF_DEMOTE = 5, | ||
| 465 | }; | 463 | }; |
| 466 | 464 | ||
| 467 | #define GFS2_FSNAME_LEN 256 | 465 | #define GFS2_FSNAME_LEN 256 |
| @@ -618,6 +616,7 @@ struct gfs2_sbd { | |||
| 618 | unsigned int sd_log_commited_databuf; | 616 | unsigned int sd_log_commited_databuf; |
| 619 | int sd_log_commited_revoke; | 617 | int sd_log_commited_revoke; |
| 620 | 618 | ||
| 619 | atomic_t sd_log_pinned; | ||
| 621 | unsigned int sd_log_num_buf; | 620 | unsigned int sd_log_num_buf; |
| 622 | unsigned int sd_log_num_revoke; | 621 | unsigned int sd_log_num_revoke; |
| 623 | unsigned int sd_log_num_rg; | 622 | unsigned int sd_log_num_rg; |
| @@ -629,15 +628,17 @@ struct gfs2_sbd { | |||
| 629 | struct list_head sd_log_le_databuf; | 628 | struct list_head sd_log_le_databuf; |
| 630 | struct list_head sd_log_le_ordered; | 629 | struct list_head sd_log_le_ordered; |
| 631 | 630 | ||
| 631 | atomic_t sd_log_thresh1; | ||
| 632 | atomic_t sd_log_thresh2; | ||
| 632 | atomic_t sd_log_blks_free; | 633 | atomic_t sd_log_blks_free; |
| 633 | struct mutex sd_log_reserve_mutex; | 634 | wait_queue_head_t sd_log_waitq; |
| 635 | wait_queue_head_t sd_logd_waitq; | ||
| 634 | 636 | ||
| 635 | u64 sd_log_sequence; | 637 | u64 sd_log_sequence; |
| 636 | unsigned int sd_log_head; | 638 | unsigned int sd_log_head; |
| 637 | unsigned int sd_log_tail; | 639 | unsigned int sd_log_tail; |
| 638 | int sd_log_idle; | 640 | int sd_log_idle; |
| 639 | 641 | ||
| 640 | unsigned long sd_log_flush_time; | ||
| 641 | struct rw_semaphore sd_log_flush_lock; | 642 | struct rw_semaphore sd_log_flush_lock; |
| 642 | atomic_t sd_log_in_flight; | 643 | atomic_t sd_log_in_flight; |
| 643 | wait_queue_head_t sd_log_flush_wait; | 644 | wait_queue_head_t sd_log_flush_wait; |
diff --git a/fs/gfs2/inode.c b/fs/gfs2/inode.c index b1bf2694fb2b..51d8061fa07a 100644 --- a/fs/gfs2/inode.c +++ b/fs/gfs2/inode.c | |||
| @@ -158,7 +158,6 @@ void gfs2_set_iop(struct inode *inode) | |||
| 158 | * @sb: The super block | 158 | * @sb: The super block |
| 159 | * @no_addr: The inode number | 159 | * @no_addr: The inode number |
| 160 | * @type: The type of the inode | 160 | * @type: The type of the inode |
| 161 | * @skip_freeing: set this not return an inode if it is currently being freed. | ||
| 162 | * | 161 | * |
| 163 | * Returns: A VFS inode, or an error | 162 | * Returns: A VFS inode, or an error |
| 164 | */ | 163 | */ |
| @@ -166,17 +165,14 @@ void gfs2_set_iop(struct inode *inode) | |||
| 166 | struct inode *gfs2_inode_lookup(struct super_block *sb, | 165 | struct inode *gfs2_inode_lookup(struct super_block *sb, |
| 167 | unsigned int type, | 166 | unsigned int type, |
| 168 | u64 no_addr, | 167 | u64 no_addr, |
| 169 | u64 no_formal_ino, int skip_freeing) | 168 | u64 no_formal_ino) |
| 170 | { | 169 | { |
| 171 | struct inode *inode; | 170 | struct inode *inode; |
| 172 | struct gfs2_inode *ip; | 171 | struct gfs2_inode *ip; |
| 173 | struct gfs2_glock *io_gl; | 172 | struct gfs2_glock *io_gl; |
| 174 | int error; | 173 | int error; |
| 175 | 174 | ||
| 176 | if (skip_freeing) | 175 | inode = gfs2_iget(sb, no_addr); |
| 177 | inode = gfs2_iget_skip(sb, no_addr); | ||
| 178 | else | ||
| 179 | inode = gfs2_iget(sb, no_addr); | ||
| 180 | ip = GFS2_I(inode); | 176 | ip = GFS2_I(inode); |
| 181 | 177 | ||
| 182 | if (!inode) | 178 | if (!inode) |
| @@ -234,13 +230,100 @@ fail_glock: | |||
| 234 | fail_iopen: | 230 | fail_iopen: |
| 235 | gfs2_glock_put(io_gl); | 231 | gfs2_glock_put(io_gl); |
| 236 | fail_put: | 232 | fail_put: |
| 237 | ip->i_gl->gl_object = NULL; | 233 | if (inode->i_state & I_NEW) |
| 234 | ip->i_gl->gl_object = NULL; | ||
| 238 | gfs2_glock_put(ip->i_gl); | 235 | gfs2_glock_put(ip->i_gl); |
| 239 | fail: | 236 | fail: |
| 240 | iget_failed(inode); | 237 | if (inode->i_state & I_NEW) |
| 238 | iget_failed(inode); | ||
| 239 | else | ||
| 240 | iput(inode); | ||
| 241 | return ERR_PTR(error); | 241 | return ERR_PTR(error); |
| 242 | } | 242 | } |
| 243 | 243 | ||
| 244 | /** | ||
| 245 | * gfs2_unlinked_inode_lookup - Lookup an unlinked inode for reclamation | ||
| 246 | * @sb: The super block | ||
| 247 | * no_addr: The inode number | ||
| 248 | * @@inode: A pointer to the inode found, if any | ||
| 249 | * | ||
| 250 | * Returns: 0 and *inode if no errors occurred. If an error occurs, | ||
| 251 | * the resulting *inode may or may not be NULL. | ||
| 252 | */ | ||
| 253 | |||
| 254 | int gfs2_unlinked_inode_lookup(struct super_block *sb, u64 no_addr, | ||
| 255 | struct inode **inode) | ||
| 256 | { | ||
| 257 | struct gfs2_sbd *sdp; | ||
| 258 | struct gfs2_inode *ip; | ||
| 259 | struct gfs2_glock *io_gl; | ||
| 260 | int error; | ||
| 261 | struct gfs2_holder gh; | ||
| 262 | |||
| 263 | *inode = gfs2_iget_skip(sb, no_addr); | ||
| 264 | |||
| 265 | if (!(*inode)) | ||
| 266 | return -ENOBUFS; | ||
| 267 | |||
| 268 | if (!((*inode)->i_state & I_NEW)) | ||
| 269 | return -ENOBUFS; | ||
| 270 | |||
| 271 | ip = GFS2_I(*inode); | ||
| 272 | sdp = GFS2_SB(*inode); | ||
| 273 | ip->i_no_formal_ino = -1; | ||
| 274 | |||
| 275 | error = gfs2_glock_get(sdp, no_addr, &gfs2_inode_glops, CREATE, &ip->i_gl); | ||
| 276 | if (unlikely(error)) | ||
| 277 | goto fail; | ||
| 278 | ip->i_gl->gl_object = ip; | ||
| 279 | |||
| 280 | error = gfs2_glock_get(sdp, no_addr, &gfs2_iopen_glops, CREATE, &io_gl); | ||
| 281 | if (unlikely(error)) | ||
| 282 | goto fail_put; | ||
| 283 | |||
| 284 | set_bit(GIF_INVALID, &ip->i_flags); | ||
| 285 | error = gfs2_glock_nq_init(io_gl, LM_ST_SHARED, LM_FLAG_TRY | GL_EXACT, | ||
| 286 | &ip->i_iopen_gh); | ||
| 287 | if (unlikely(error)) { | ||
| 288 | if (error == GLR_TRYFAILED) | ||
| 289 | error = 0; | ||
| 290 | goto fail_iopen; | ||
| 291 | } | ||
| 292 | ip->i_iopen_gh.gh_gl->gl_object = ip; | ||
| 293 | gfs2_glock_put(io_gl); | ||
| 294 | |||
| 295 | (*inode)->i_mode = DT2IF(DT_UNKNOWN); | ||
| 296 | |||
| 297 | /* | ||
| 298 | * We must read the inode in order to work out its type in | ||
| 299 | * this case. Note that this doesn't happen often as we normally | ||
| 300 | * know the type beforehand. This code path only occurs during | ||
| 301 | * unlinked inode recovery (where it is safe to do this glock, | ||
| 302 | * which is not true in the general case). | ||
| 303 | */ | ||
| 304 | error = gfs2_glock_nq_init(ip->i_gl, LM_ST_EXCLUSIVE, LM_FLAG_TRY, | ||
| 305 | &gh); | ||
| 306 | if (unlikely(error)) { | ||
| 307 | if (error == GLR_TRYFAILED) | ||
| 308 | error = 0; | ||
| 309 | goto fail_glock; | ||
| 310 | } | ||
| 311 | /* Inode is now uptodate */ | ||
| 312 | gfs2_glock_dq_uninit(&gh); | ||
| 313 | gfs2_set_iop(*inode); | ||
| 314 | |||
| 315 | return 0; | ||
| 316 | fail_glock: | ||
| 317 | gfs2_glock_dq(&ip->i_iopen_gh); | ||
| 318 | fail_iopen: | ||
| 319 | gfs2_glock_put(io_gl); | ||
| 320 | fail_put: | ||
| 321 | ip->i_gl->gl_object = NULL; | ||
| 322 | gfs2_glock_put(ip->i_gl); | ||
| 323 | fail: | ||
| 324 | return error; | ||
| 325 | } | ||
| 326 | |||
| 244 | static int gfs2_dinode_in(struct gfs2_inode *ip, const void *buf) | 327 | static int gfs2_dinode_in(struct gfs2_inode *ip, const void *buf) |
| 245 | { | 328 | { |
| 246 | const struct gfs2_dinode *str = buf; | 329 | const struct gfs2_dinode *str = buf; |
| @@ -862,7 +945,7 @@ struct inode *gfs2_createi(struct gfs2_holder *ghs, const struct qstr *name, | |||
| 862 | goto fail_gunlock2; | 945 | goto fail_gunlock2; |
| 863 | 946 | ||
| 864 | inode = gfs2_inode_lookup(dir->i_sb, IF2DT(mode), inum.no_addr, | 947 | inode = gfs2_inode_lookup(dir->i_sb, IF2DT(mode), inum.no_addr, |
| 865 | inum.no_formal_ino, 0); | 948 | inum.no_formal_ino); |
| 866 | if (IS_ERR(inode)) | 949 | if (IS_ERR(inode)) |
| 867 | goto fail_gunlock2; | 950 | goto fail_gunlock2; |
| 868 | 951 | ||
diff --git a/fs/gfs2/inode.h b/fs/gfs2/inode.h index c341aaf67adb..e161461d4c57 100644 --- a/fs/gfs2/inode.h +++ b/fs/gfs2/inode.h | |||
| @@ -83,8 +83,9 @@ static inline void gfs2_inum_out(const struct gfs2_inode *ip, | |||
| 83 | 83 | ||
| 84 | extern void gfs2_set_iop(struct inode *inode); | 84 | extern void gfs2_set_iop(struct inode *inode); |
| 85 | extern struct inode *gfs2_inode_lookup(struct super_block *sb, unsigned type, | 85 | extern struct inode *gfs2_inode_lookup(struct super_block *sb, unsigned type, |
| 86 | u64 no_addr, u64 no_formal_ino, | 86 | u64 no_addr, u64 no_formal_ino); |
| 87 | int skip_freeing); | 87 | extern int gfs2_unlinked_inode_lookup(struct super_block *sb, u64 no_addr, |
| 88 | struct inode **inode); | ||
| 88 | extern struct inode *gfs2_ilookup(struct super_block *sb, u64 no_addr); | 89 | extern struct inode *gfs2_ilookup(struct super_block *sb, u64 no_addr); |
| 89 | 90 | ||
| 90 | extern int gfs2_inode_refresh(struct gfs2_inode *ip); | 91 | extern int gfs2_inode_refresh(struct gfs2_inode *ip); |
diff --git a/fs/gfs2/log.c b/fs/gfs2/log.c index e5bf4b59d46e..b593f0e28f25 100644 --- a/fs/gfs2/log.c +++ b/fs/gfs2/log.c | |||
| @@ -168,12 +168,11 @@ static int gfs2_ail1_empty_one(struct gfs2_sbd *sdp, struct gfs2_ail *ai, int fl | |||
| 168 | return list_empty(&ai->ai_ail1_list); | 168 | return list_empty(&ai->ai_ail1_list); |
| 169 | } | 169 | } |
| 170 | 170 | ||
| 171 | static void gfs2_ail1_start(struct gfs2_sbd *sdp, int flags) | 171 | static void gfs2_ail1_start(struct gfs2_sbd *sdp) |
| 172 | { | 172 | { |
| 173 | struct list_head *head; | 173 | struct list_head *head; |
| 174 | u64 sync_gen; | 174 | u64 sync_gen; |
| 175 | struct list_head *first; | 175 | struct gfs2_ail *ai; |
| 176 | struct gfs2_ail *first_ai, *ai, *tmp; | ||
| 177 | int done = 0; | 176 | int done = 0; |
| 178 | 177 | ||
| 179 | gfs2_log_lock(sdp); | 178 | gfs2_log_lock(sdp); |
| @@ -184,21 +183,9 @@ static void gfs2_ail1_start(struct gfs2_sbd *sdp, int flags) | |||
| 184 | } | 183 | } |
| 185 | sync_gen = sdp->sd_ail_sync_gen++; | 184 | sync_gen = sdp->sd_ail_sync_gen++; |
| 186 | 185 | ||
| 187 | first = head->prev; | ||
| 188 | first_ai = list_entry(first, struct gfs2_ail, ai_list); | ||
| 189 | first_ai->ai_sync_gen = sync_gen; | ||
| 190 | gfs2_ail1_start_one(sdp, first_ai); /* This may drop log lock */ | ||
| 191 | |||
| 192 | if (flags & DIO_ALL) | ||
| 193 | first = NULL; | ||
| 194 | |||
| 195 | while(!done) { | 186 | while(!done) { |
| 196 | if (first && (head->prev != first || | ||
| 197 | gfs2_ail1_empty_one(sdp, first_ai, 0))) | ||
| 198 | break; | ||
| 199 | |||
| 200 | done = 1; | 187 | done = 1; |
| 201 | list_for_each_entry_safe_reverse(ai, tmp, head, ai_list) { | 188 | list_for_each_entry_reverse(ai, head, ai_list) { |
| 202 | if (ai->ai_sync_gen >= sync_gen) | 189 | if (ai->ai_sync_gen >= sync_gen) |
| 203 | continue; | 190 | continue; |
| 204 | ai->ai_sync_gen = sync_gen; | 191 | ai->ai_sync_gen = sync_gen; |
| @@ -290,58 +277,57 @@ static void ail2_empty(struct gfs2_sbd *sdp, unsigned int new_tail) | |||
| 290 | * flush time, so we ensure that we have just enough free blocks at all | 277 | * flush time, so we ensure that we have just enough free blocks at all |
| 291 | * times to avoid running out during a log flush. | 278 | * times to avoid running out during a log flush. |
| 292 | * | 279 | * |
| 280 | * We no longer flush the log here, instead we wake up logd to do that | ||
| 281 | * for us. To avoid the thundering herd and to ensure that we deal fairly | ||
| 282 | * with queued waiters, we use an exclusive wait. This means that when we | ||
| 283 | * get woken with enough journal space to get our reservation, we need to | ||
| 284 | * wake the next waiter on the list. | ||
| 285 | * | ||
| 293 | * Returns: errno | 286 | * Returns: errno |
| 294 | */ | 287 | */ |
| 295 | 288 | ||
| 296 | int gfs2_log_reserve(struct gfs2_sbd *sdp, unsigned int blks) | 289 | int gfs2_log_reserve(struct gfs2_sbd *sdp, unsigned int blks) |
| 297 | { | 290 | { |
| 298 | unsigned int try = 0; | ||
| 299 | unsigned reserved_blks = 6 * (4096 / sdp->sd_vfs->s_blocksize); | 291 | unsigned reserved_blks = 6 * (4096 / sdp->sd_vfs->s_blocksize); |
| 292 | unsigned wanted = blks + reserved_blks; | ||
| 293 | DEFINE_WAIT(wait); | ||
| 294 | int did_wait = 0; | ||
| 295 | unsigned int free_blocks; | ||
| 300 | 296 | ||
| 301 | if (gfs2_assert_warn(sdp, blks) || | 297 | if (gfs2_assert_warn(sdp, blks) || |
| 302 | gfs2_assert_warn(sdp, blks <= sdp->sd_jdesc->jd_blocks)) | 298 | gfs2_assert_warn(sdp, blks <= sdp->sd_jdesc->jd_blocks)) |
| 303 | return -EINVAL; | 299 | return -EINVAL; |
| 304 | 300 | retry: | |
| 305 | mutex_lock(&sdp->sd_log_reserve_mutex); | 301 | free_blocks = atomic_read(&sdp->sd_log_blks_free); |
| 306 | gfs2_log_lock(sdp); | 302 | if (unlikely(free_blocks <= wanted)) { |
| 307 | while(atomic_read(&sdp->sd_log_blks_free) <= (blks + reserved_blks)) { | 303 | do { |
| 308 | gfs2_log_unlock(sdp); | 304 | prepare_to_wait_exclusive(&sdp->sd_log_waitq, &wait, |
| 309 | gfs2_ail1_empty(sdp, 0); | 305 | TASK_UNINTERRUPTIBLE); |
| 310 | gfs2_log_flush(sdp, NULL); | 306 | wake_up(&sdp->sd_logd_waitq); |
| 311 | 307 | did_wait = 1; | |
| 312 | if (try++) | 308 | if (atomic_read(&sdp->sd_log_blks_free) <= wanted) |
| 313 | gfs2_ail1_start(sdp, 0); | 309 | io_schedule(); |
| 314 | gfs2_log_lock(sdp); | 310 | free_blocks = atomic_read(&sdp->sd_log_blks_free); |
| 311 | } while(free_blocks <= wanted); | ||
| 312 | finish_wait(&sdp->sd_log_waitq, &wait); | ||
| 315 | } | 313 | } |
| 316 | atomic_sub(blks, &sdp->sd_log_blks_free); | 314 | if (atomic_cmpxchg(&sdp->sd_log_blks_free, free_blocks, |
| 315 | free_blocks - blks) != free_blocks) | ||
| 316 | goto retry; | ||
| 317 | trace_gfs2_log_blocks(sdp, -blks); | 317 | trace_gfs2_log_blocks(sdp, -blks); |
| 318 | gfs2_log_unlock(sdp); | 318 | |
| 319 | mutex_unlock(&sdp->sd_log_reserve_mutex); | 319 | /* |
| 320 | * If we waited, then so might others, wake them up _after_ we get | ||
| 321 | * our share of the log. | ||
| 322 | */ | ||
| 323 | if (unlikely(did_wait)) | ||
| 324 | wake_up(&sdp->sd_log_waitq); | ||
| 320 | 325 | ||
| 321 | down_read(&sdp->sd_log_flush_lock); | 326 | down_read(&sdp->sd_log_flush_lock); |
| 322 | 327 | ||
| 323 | return 0; | 328 | return 0; |
| 324 | } | 329 | } |
| 325 | 330 | ||
| 326 | /** | ||
| 327 | * gfs2_log_release - Release a given number of log blocks | ||
| 328 | * @sdp: The GFS2 superblock | ||
| 329 | * @blks: The number of blocks | ||
| 330 | * | ||
| 331 | */ | ||
| 332 | |||
| 333 | void gfs2_log_release(struct gfs2_sbd *sdp, unsigned int blks) | ||
| 334 | { | ||
| 335 | |||
| 336 | gfs2_log_lock(sdp); | ||
| 337 | atomic_add(blks, &sdp->sd_log_blks_free); | ||
| 338 | trace_gfs2_log_blocks(sdp, blks); | ||
| 339 | gfs2_assert_withdraw(sdp, | ||
| 340 | atomic_read(&sdp->sd_log_blks_free) <= sdp->sd_jdesc->jd_blocks); | ||
| 341 | gfs2_log_unlock(sdp); | ||
| 342 | up_read(&sdp->sd_log_flush_lock); | ||
| 343 | } | ||
| 344 | |||
| 345 | static u64 log_bmap(struct gfs2_sbd *sdp, unsigned int lbn) | 331 | static u64 log_bmap(struct gfs2_sbd *sdp, unsigned int lbn) |
| 346 | { | 332 | { |
| 347 | struct gfs2_journal_extent *je; | 333 | struct gfs2_journal_extent *je; |
| @@ -559,11 +545,10 @@ static void log_pull_tail(struct gfs2_sbd *sdp, unsigned int new_tail) | |||
| 559 | 545 | ||
| 560 | ail2_empty(sdp, new_tail); | 546 | ail2_empty(sdp, new_tail); |
| 561 | 547 | ||
| 562 | gfs2_log_lock(sdp); | ||
| 563 | atomic_add(dist, &sdp->sd_log_blks_free); | 548 | atomic_add(dist, &sdp->sd_log_blks_free); |
| 564 | trace_gfs2_log_blocks(sdp, dist); | 549 | trace_gfs2_log_blocks(sdp, dist); |
| 565 | gfs2_assert_withdraw(sdp, atomic_read(&sdp->sd_log_blks_free) <= sdp->sd_jdesc->jd_blocks); | 550 | gfs2_assert_withdraw(sdp, atomic_read(&sdp->sd_log_blks_free) <= |
| 566 | gfs2_log_unlock(sdp); | 551 | sdp->sd_jdesc->jd_blocks); |
| 567 | 552 | ||
| 568 | sdp->sd_log_tail = new_tail; | 553 | sdp->sd_log_tail = new_tail; |
| 569 | } | 554 | } |
| @@ -615,6 +600,7 @@ static void log_write_header(struct gfs2_sbd *sdp, u32 flags, int pull) | |||
| 615 | if (buffer_eopnotsupp(bh)) { | 600 | if (buffer_eopnotsupp(bh)) { |
| 616 | clear_buffer_eopnotsupp(bh); | 601 | clear_buffer_eopnotsupp(bh); |
| 617 | set_buffer_uptodate(bh); | 602 | set_buffer_uptodate(bh); |
| 603 | fs_info(sdp, "barrier sync failed - disabling barriers\n"); | ||
| 618 | set_bit(SDF_NOBARRIERS, &sdp->sd_flags); | 604 | set_bit(SDF_NOBARRIERS, &sdp->sd_flags); |
| 619 | lock_buffer(bh); | 605 | lock_buffer(bh); |
| 620 | skip_barrier: | 606 | skip_barrier: |
| @@ -822,6 +808,13 @@ static void buf_lo_incore_commit(struct gfs2_sbd *sdp, struct gfs2_trans *tr) | |||
| 822 | * @sdp: the filesystem | 808 | * @sdp: the filesystem |
| 823 | * @tr: the transaction | 809 | * @tr: the transaction |
| 824 | * | 810 | * |
| 811 | * We wake up gfs2_logd if the number of pinned blocks exceed thresh1 | ||
| 812 | * or the total number of used blocks (pinned blocks plus AIL blocks) | ||
| 813 | * is greater than thresh2. | ||
| 814 | * | ||
| 815 | * At mount time thresh1 is 1/3rd of journal size, thresh2 is 2/3rd of | ||
| 816 | * journal size. | ||
| 817 | * | ||
| 825 | * Returns: errno | 818 | * Returns: errno |
| 826 | */ | 819 | */ |
| 827 | 820 | ||
| @@ -832,10 +825,10 @@ void gfs2_log_commit(struct gfs2_sbd *sdp, struct gfs2_trans *tr) | |||
| 832 | 825 | ||
| 833 | up_read(&sdp->sd_log_flush_lock); | 826 | up_read(&sdp->sd_log_flush_lock); |
| 834 | 827 | ||
| 835 | gfs2_log_lock(sdp); | 828 | if (atomic_read(&sdp->sd_log_pinned) > atomic_read(&sdp->sd_log_thresh1) || |
| 836 | if (sdp->sd_log_num_buf > gfs2_tune_get(sdp, gt_incore_log_blocks)) | 829 | ((sdp->sd_jdesc->jd_blocks - atomic_read(&sdp->sd_log_blks_free)) > |
| 837 | wake_up_process(sdp->sd_logd_process); | 830 | atomic_read(&sdp->sd_log_thresh2))) |
| 838 | gfs2_log_unlock(sdp); | 831 | wake_up(&sdp->sd_logd_waitq); |
| 839 | } | 832 | } |
| 840 | 833 | ||
| 841 | /** | 834 | /** |
| @@ -882,13 +875,23 @@ void gfs2_meta_syncfs(struct gfs2_sbd *sdp) | |||
| 882 | { | 875 | { |
| 883 | gfs2_log_flush(sdp, NULL); | 876 | gfs2_log_flush(sdp, NULL); |
| 884 | for (;;) { | 877 | for (;;) { |
| 885 | gfs2_ail1_start(sdp, DIO_ALL); | 878 | gfs2_ail1_start(sdp); |
| 886 | if (gfs2_ail1_empty(sdp, DIO_ALL)) | 879 | if (gfs2_ail1_empty(sdp, DIO_ALL)) |
| 887 | break; | 880 | break; |
| 888 | msleep(10); | 881 | msleep(10); |
| 889 | } | 882 | } |
| 890 | } | 883 | } |
| 891 | 884 | ||
| 885 | static inline int gfs2_jrnl_flush_reqd(struct gfs2_sbd *sdp) | ||
| 886 | { | ||
| 887 | return (atomic_read(&sdp->sd_log_pinned) >= atomic_read(&sdp->sd_log_thresh1)); | ||
| 888 | } | ||
| 889 | |||
| 890 | static inline int gfs2_ail_flush_reqd(struct gfs2_sbd *sdp) | ||
| 891 | { | ||
| 892 | unsigned int used_blocks = sdp->sd_jdesc->jd_blocks - atomic_read(&sdp->sd_log_blks_free); | ||
| 893 | return used_blocks >= atomic_read(&sdp->sd_log_thresh2); | ||
| 894 | } | ||
| 892 | 895 | ||
| 893 | /** | 896 | /** |
| 894 | * gfs2_logd - Update log tail as Active Items get flushed to in-place blocks | 897 | * gfs2_logd - Update log tail as Active Items get flushed to in-place blocks |
| @@ -901,28 +904,43 @@ void gfs2_meta_syncfs(struct gfs2_sbd *sdp) | |||
| 901 | int gfs2_logd(void *data) | 904 | int gfs2_logd(void *data) |
| 902 | { | 905 | { |
| 903 | struct gfs2_sbd *sdp = data; | 906 | struct gfs2_sbd *sdp = data; |
| 904 | unsigned long t; | 907 | unsigned long t = 1; |
| 905 | int need_flush; | 908 | DEFINE_WAIT(wait); |
| 909 | unsigned preflush; | ||
| 906 | 910 | ||
| 907 | while (!kthread_should_stop()) { | 911 | while (!kthread_should_stop()) { |
| 908 | /* Advance the log tail */ | ||
| 909 | 912 | ||
| 910 | t = sdp->sd_log_flush_time + | 913 | preflush = atomic_read(&sdp->sd_log_pinned); |
| 911 | gfs2_tune_get(sdp, gt_log_flush_secs) * HZ; | 914 | if (gfs2_jrnl_flush_reqd(sdp) || t == 0) { |
| 915 | gfs2_ail1_empty(sdp, DIO_ALL); | ||
| 916 | gfs2_log_flush(sdp, NULL); | ||
| 917 | gfs2_ail1_empty(sdp, DIO_ALL); | ||
| 918 | } | ||
| 912 | 919 | ||
| 913 | gfs2_ail1_empty(sdp, DIO_ALL); | 920 | if (gfs2_ail_flush_reqd(sdp)) { |
| 914 | gfs2_log_lock(sdp); | 921 | gfs2_ail1_start(sdp); |
| 915 | need_flush = sdp->sd_log_num_buf > gfs2_tune_get(sdp, gt_incore_log_blocks); | 922 | io_schedule(); |
| 916 | gfs2_log_unlock(sdp); | 923 | gfs2_ail1_empty(sdp, 0); |
| 917 | if (need_flush || time_after_eq(jiffies, t)) { | ||
| 918 | gfs2_log_flush(sdp, NULL); | 924 | gfs2_log_flush(sdp, NULL); |
| 919 | sdp->sd_log_flush_time = jiffies; | 925 | gfs2_ail1_empty(sdp, DIO_ALL); |
| 920 | } | 926 | } |
| 921 | 927 | ||
| 928 | wake_up(&sdp->sd_log_waitq); | ||
| 922 | t = gfs2_tune_get(sdp, gt_logd_secs) * HZ; | 929 | t = gfs2_tune_get(sdp, gt_logd_secs) * HZ; |
| 923 | if (freezing(current)) | 930 | if (freezing(current)) |
| 924 | refrigerator(); | 931 | refrigerator(); |
| 925 | schedule_timeout_interruptible(t); | 932 | |
| 933 | do { | ||
| 934 | prepare_to_wait(&sdp->sd_logd_waitq, &wait, | ||
| 935 | TASK_UNINTERRUPTIBLE); | ||
| 936 | if (!gfs2_ail_flush_reqd(sdp) && | ||
| 937 | !gfs2_jrnl_flush_reqd(sdp) && | ||
| 938 | !kthread_should_stop()) | ||
| 939 | t = schedule_timeout(t); | ||
| 940 | } while(t && !gfs2_ail_flush_reqd(sdp) && | ||
| 941 | !gfs2_jrnl_flush_reqd(sdp) && | ||
| 942 | !kthread_should_stop()); | ||
| 943 | finish_wait(&sdp->sd_logd_waitq, &wait); | ||
| 926 | } | 944 | } |
| 927 | 945 | ||
| 928 | return 0; | 946 | return 0; |
diff --git a/fs/gfs2/log.h b/fs/gfs2/log.h index 7c64510ccfd2..eb570b4ad443 100644 --- a/fs/gfs2/log.h +++ b/fs/gfs2/log.h | |||
| @@ -51,7 +51,6 @@ unsigned int gfs2_struct2blk(struct gfs2_sbd *sdp, unsigned int nstruct, | |||
| 51 | unsigned int ssize); | 51 | unsigned int ssize); |
| 52 | 52 | ||
| 53 | int gfs2_log_reserve(struct gfs2_sbd *sdp, unsigned int blks); | 53 | int gfs2_log_reserve(struct gfs2_sbd *sdp, unsigned int blks); |
| 54 | void gfs2_log_release(struct gfs2_sbd *sdp, unsigned int blks); | ||
| 55 | void gfs2_log_incr_head(struct gfs2_sbd *sdp); | 54 | void gfs2_log_incr_head(struct gfs2_sbd *sdp); |
| 56 | 55 | ||
| 57 | struct buffer_head *gfs2_log_get_buf(struct gfs2_sbd *sdp); | 56 | struct buffer_head *gfs2_log_get_buf(struct gfs2_sbd *sdp); |
diff --git a/fs/gfs2/lops.c b/fs/gfs2/lops.c index adc260fbea90..bf33f822058d 100644 --- a/fs/gfs2/lops.c +++ b/fs/gfs2/lops.c | |||
| @@ -54,6 +54,7 @@ static void gfs2_pin(struct gfs2_sbd *sdp, struct buffer_head *bh) | |||
| 54 | if (bd->bd_ail) | 54 | if (bd->bd_ail) |
| 55 | list_move(&bd->bd_ail_st_list, &bd->bd_ail->ai_ail2_list); | 55 | list_move(&bd->bd_ail_st_list, &bd->bd_ail->ai_ail2_list); |
| 56 | get_bh(bh); | 56 | get_bh(bh); |
| 57 | atomic_inc(&sdp->sd_log_pinned); | ||
| 57 | trace_gfs2_pin(bd, 1); | 58 | trace_gfs2_pin(bd, 1); |
| 58 | } | 59 | } |
| 59 | 60 | ||
| @@ -94,6 +95,7 @@ static void gfs2_unpin(struct gfs2_sbd *sdp, struct buffer_head *bh, | |||
| 94 | trace_gfs2_pin(bd, 0); | 95 | trace_gfs2_pin(bd, 0); |
| 95 | gfs2_log_unlock(sdp); | 96 | gfs2_log_unlock(sdp); |
| 96 | unlock_buffer(bh); | 97 | unlock_buffer(bh); |
| 98 | atomic_dec(&sdp->sd_log_pinned); | ||
| 97 | } | 99 | } |
| 98 | 100 | ||
| 99 | 101 | ||
diff --git a/fs/gfs2/main.c b/fs/gfs2/main.c index a88fadc704bb..fb2a5f93b7c3 100644 --- a/fs/gfs2/main.c +++ b/fs/gfs2/main.c | |||
| @@ -94,7 +94,7 @@ static int __init init_gfs2_fs(void) | |||
| 94 | if (!gfs2_glock_cachep) | 94 | if (!gfs2_glock_cachep) |
| 95 | goto fail; | 95 | goto fail; |
| 96 | 96 | ||
| 97 | gfs2_glock_aspace_cachep = kmem_cache_create("gfs2_glock (aspace)", | 97 | gfs2_glock_aspace_cachep = kmem_cache_create("gfs2_glock(aspace)", |
| 98 | sizeof(struct gfs2_glock) + | 98 | sizeof(struct gfs2_glock) + |
| 99 | sizeof(struct address_space), | 99 | sizeof(struct address_space), |
| 100 | 0, 0, gfs2_init_gl_aspace_once); | 100 | 0, 0, gfs2_init_gl_aspace_once); |
diff --git a/fs/gfs2/meta_io.c b/fs/gfs2/meta_io.c index 0bb12c80937a..18176d0b75d7 100644 --- a/fs/gfs2/meta_io.c +++ b/fs/gfs2/meta_io.c | |||
| @@ -34,7 +34,6 @@ | |||
| 34 | 34 | ||
| 35 | static int gfs2_aspace_writepage(struct page *page, struct writeback_control *wbc) | 35 | static int gfs2_aspace_writepage(struct page *page, struct writeback_control *wbc) |
| 36 | { | 36 | { |
| 37 | int err; | ||
| 38 | struct buffer_head *bh, *head; | 37 | struct buffer_head *bh, *head; |
| 39 | int nr_underway = 0; | 38 | int nr_underway = 0; |
| 40 | int write_op = (1 << BIO_RW_META) | ((wbc->sync_mode == WB_SYNC_ALL ? | 39 | int write_op = (1 << BIO_RW_META) | ((wbc->sync_mode == WB_SYNC_ALL ? |
| @@ -86,11 +85,10 @@ static int gfs2_aspace_writepage(struct page *page, struct writeback_control *wb | |||
| 86 | } while (bh != head); | 85 | } while (bh != head); |
| 87 | unlock_page(page); | 86 | unlock_page(page); |
| 88 | 87 | ||
| 89 | err = 0; | ||
| 90 | if (nr_underway == 0) | 88 | if (nr_underway == 0) |
| 91 | end_page_writeback(page); | 89 | end_page_writeback(page); |
| 92 | 90 | ||
| 93 | return err; | 91 | return 0; |
| 94 | } | 92 | } |
| 95 | 93 | ||
| 96 | const struct address_space_operations gfs2_meta_aops = { | 94 | const struct address_space_operations gfs2_meta_aops = { |
| @@ -313,6 +311,7 @@ void gfs2_remove_from_journal(struct buffer_head *bh, struct gfs2_trans *tr, int | |||
| 313 | struct gfs2_bufdata *bd = bh->b_private; | 311 | struct gfs2_bufdata *bd = bh->b_private; |
| 314 | 312 | ||
| 315 | if (test_clear_buffer_pinned(bh)) { | 313 | if (test_clear_buffer_pinned(bh)) { |
| 314 | atomic_dec(&sdp->sd_log_pinned); | ||
| 316 | list_del_init(&bd->bd_le.le_list); | 315 | list_del_init(&bd->bd_le.le_list); |
| 317 | if (meta) { | 316 | if (meta) { |
| 318 | gfs2_assert_warn(sdp, sdp->sd_log_num_buf); | 317 | gfs2_assert_warn(sdp, sdp->sd_log_num_buf); |
diff --git a/fs/gfs2/ops_fstype.c b/fs/gfs2/ops_fstype.c index c1309ed1c496..3593b3a7290e 100644 --- a/fs/gfs2/ops_fstype.c +++ b/fs/gfs2/ops_fstype.c | |||
| @@ -57,8 +57,6 @@ static void gfs2_tune_init(struct gfs2_tune *gt) | |||
| 57 | { | 57 | { |
| 58 | spin_lock_init(>->gt_spin); | 58 | spin_lock_init(>->gt_spin); |
| 59 | 59 | ||
| 60 | gt->gt_incore_log_blocks = 1024; | ||
| 61 | gt->gt_logd_secs = 1; | ||
| 62 | gt->gt_quota_simul_sync = 64; | 60 | gt->gt_quota_simul_sync = 64; |
| 63 | gt->gt_quota_warn_period = 10; | 61 | gt->gt_quota_warn_period = 10; |
| 64 | gt->gt_quota_scale_num = 1; | 62 | gt->gt_quota_scale_num = 1; |
| @@ -101,14 +99,15 @@ static struct gfs2_sbd *init_sbd(struct super_block *sb) | |||
| 101 | spin_lock_init(&sdp->sd_trunc_lock); | 99 | spin_lock_init(&sdp->sd_trunc_lock); |
| 102 | 100 | ||
| 103 | spin_lock_init(&sdp->sd_log_lock); | 101 | spin_lock_init(&sdp->sd_log_lock); |
| 104 | 102 | atomic_set(&sdp->sd_log_pinned, 0); | |
| 105 | INIT_LIST_HEAD(&sdp->sd_log_le_buf); | 103 | INIT_LIST_HEAD(&sdp->sd_log_le_buf); |
| 106 | INIT_LIST_HEAD(&sdp->sd_log_le_revoke); | 104 | INIT_LIST_HEAD(&sdp->sd_log_le_revoke); |
| 107 | INIT_LIST_HEAD(&sdp->sd_log_le_rg); | 105 | INIT_LIST_HEAD(&sdp->sd_log_le_rg); |
| 108 | INIT_LIST_HEAD(&sdp->sd_log_le_databuf); | 106 | INIT_LIST_HEAD(&sdp->sd_log_le_databuf); |
| 109 | INIT_LIST_HEAD(&sdp->sd_log_le_ordered); | 107 | INIT_LIST_HEAD(&sdp->sd_log_le_ordered); |
| 110 | 108 | ||
| 111 | mutex_init(&sdp->sd_log_reserve_mutex); | 109 | init_waitqueue_head(&sdp->sd_log_waitq); |
| 110 | init_waitqueue_head(&sdp->sd_logd_waitq); | ||
| 112 | INIT_LIST_HEAD(&sdp->sd_ail1_list); | 111 | INIT_LIST_HEAD(&sdp->sd_ail1_list); |
| 113 | INIT_LIST_HEAD(&sdp->sd_ail2_list); | 112 | INIT_LIST_HEAD(&sdp->sd_ail2_list); |
| 114 | 113 | ||
| @@ -487,7 +486,7 @@ static int gfs2_lookup_root(struct super_block *sb, struct dentry **dptr, | |||
| 487 | struct dentry *dentry; | 486 | struct dentry *dentry; |
| 488 | struct inode *inode; | 487 | struct inode *inode; |
| 489 | 488 | ||
| 490 | inode = gfs2_inode_lookup(sb, DT_DIR, no_addr, 0, 0); | 489 | inode = gfs2_inode_lookup(sb, DT_DIR, no_addr, 0); |
| 491 | if (IS_ERR(inode)) { | 490 | if (IS_ERR(inode)) { |
| 492 | fs_err(sdp, "can't read in %s inode: %ld\n", name, PTR_ERR(inode)); | 491 | fs_err(sdp, "can't read in %s inode: %ld\n", name, PTR_ERR(inode)); |
| 493 | return PTR_ERR(inode); | 492 | return PTR_ERR(inode); |
| @@ -733,6 +732,8 @@ static int init_journal(struct gfs2_sbd *sdp, int undo) | |||
| 733 | if (sdp->sd_args.ar_spectator) { | 732 | if (sdp->sd_args.ar_spectator) { |
| 734 | sdp->sd_jdesc = gfs2_jdesc_find(sdp, 0); | 733 | sdp->sd_jdesc = gfs2_jdesc_find(sdp, 0); |
| 735 | atomic_set(&sdp->sd_log_blks_free, sdp->sd_jdesc->jd_blocks); | 734 | atomic_set(&sdp->sd_log_blks_free, sdp->sd_jdesc->jd_blocks); |
| 735 | atomic_set(&sdp->sd_log_thresh1, 2*sdp->sd_jdesc->jd_blocks/5); | ||
| 736 | atomic_set(&sdp->sd_log_thresh2, 4*sdp->sd_jdesc->jd_blocks/5); | ||
| 736 | } else { | 737 | } else { |
| 737 | if (sdp->sd_lockstruct.ls_jid >= gfs2_jindex_size(sdp)) { | 738 | if (sdp->sd_lockstruct.ls_jid >= gfs2_jindex_size(sdp)) { |
| 738 | fs_err(sdp, "can't mount journal #%u\n", | 739 | fs_err(sdp, "can't mount journal #%u\n", |
| @@ -770,6 +771,8 @@ static int init_journal(struct gfs2_sbd *sdp, int undo) | |||
| 770 | goto fail_jinode_gh; | 771 | goto fail_jinode_gh; |
| 771 | } | 772 | } |
| 772 | atomic_set(&sdp->sd_log_blks_free, sdp->sd_jdesc->jd_blocks); | 773 | atomic_set(&sdp->sd_log_blks_free, sdp->sd_jdesc->jd_blocks); |
| 774 | atomic_set(&sdp->sd_log_thresh1, 2*sdp->sd_jdesc->jd_blocks/5); | ||
| 775 | atomic_set(&sdp->sd_log_thresh2, 4*sdp->sd_jdesc->jd_blocks/5); | ||
| 773 | 776 | ||
| 774 | /* Map the extents for this journal's blocks */ | 777 | /* Map the extents for this journal's blocks */ |
| 775 | map_journal_extents(sdp); | 778 | map_journal_extents(sdp); |
| @@ -951,8 +954,6 @@ static int init_threads(struct gfs2_sbd *sdp, int undo) | |||
| 951 | if (undo) | 954 | if (undo) |
| 952 | goto fail_quotad; | 955 | goto fail_quotad; |
| 953 | 956 | ||
| 954 | sdp->sd_log_flush_time = jiffies; | ||
| 955 | |||
| 956 | p = kthread_run(gfs2_logd, sdp, "gfs2_logd"); | 957 | p = kthread_run(gfs2_logd, sdp, "gfs2_logd"); |
| 957 | error = IS_ERR(p); | 958 | error = IS_ERR(p); |
| 958 | if (error) { | 959 | if (error) { |
| @@ -1160,7 +1161,7 @@ static int fill_super(struct super_block *sb, struct gfs2_args *args, int silent | |||
| 1160 | GFS2_BASIC_BLOCK_SHIFT; | 1161 | GFS2_BASIC_BLOCK_SHIFT; |
| 1161 | sdp->sd_fsb2bb = 1 << sdp->sd_fsb2bb_shift; | 1162 | sdp->sd_fsb2bb = 1 << sdp->sd_fsb2bb_shift; |
| 1162 | 1163 | ||
| 1163 | sdp->sd_tune.gt_log_flush_secs = sdp->sd_args.ar_commit; | 1164 | sdp->sd_tune.gt_logd_secs = sdp->sd_args.ar_commit; |
| 1164 | sdp->sd_tune.gt_quota_quantum = sdp->sd_args.ar_quota_quantum; | 1165 | sdp->sd_tune.gt_quota_quantum = sdp->sd_args.ar_quota_quantum; |
| 1165 | if (sdp->sd_args.ar_statfs_quantum) { | 1166 | if (sdp->sd_args.ar_statfs_quantum) { |
| 1166 | sdp->sd_tune.gt_statfs_slow = 0; | 1167 | sdp->sd_tune.gt_statfs_slow = 0; |
| @@ -1323,7 +1324,7 @@ static int gfs2_get_sb(struct file_system_type *fs_type, int flags, | |||
| 1323 | memset(&args, 0, sizeof(args)); | 1324 | memset(&args, 0, sizeof(args)); |
| 1324 | args.ar_quota = GFS2_QUOTA_DEFAULT; | 1325 | args.ar_quota = GFS2_QUOTA_DEFAULT; |
| 1325 | args.ar_data = GFS2_DATA_DEFAULT; | 1326 | args.ar_data = GFS2_DATA_DEFAULT; |
| 1326 | args.ar_commit = 60; | 1327 | args.ar_commit = 30; |
| 1327 | args.ar_statfs_quantum = 30; | 1328 | args.ar_statfs_quantum = 30; |
| 1328 | args.ar_quota_quantum = 60; | 1329 | args.ar_quota_quantum = 60; |
| 1329 | args.ar_errors = GFS2_ERRORS_DEFAULT; | 1330 | args.ar_errors = GFS2_ERRORS_DEFAULT; |
diff --git a/fs/gfs2/quota.c b/fs/gfs2/quota.c index 6dbcbad6ab17..d5f4661287f9 100644 --- a/fs/gfs2/quota.c +++ b/fs/gfs2/quota.c | |||
| @@ -637,15 +637,40 @@ static int gfs2_adjust_quota(struct gfs2_inode *ip, loff_t loc, | |||
| 637 | unsigned blocksize, iblock, pos; | 637 | unsigned blocksize, iblock, pos; |
| 638 | struct buffer_head *bh, *dibh; | 638 | struct buffer_head *bh, *dibh; |
| 639 | struct page *page; | 639 | struct page *page; |
| 640 | void *kaddr; | 640 | void *kaddr, *ptr; |
| 641 | struct gfs2_quota *qp; | 641 | struct gfs2_quota q, *qp; |
| 642 | s64 value; | 642 | int err, nbytes; |
| 643 | int err = -EIO; | ||
| 644 | u64 size; | 643 | u64 size; |
| 645 | 644 | ||
| 646 | if (gfs2_is_stuffed(ip)) | 645 | if (gfs2_is_stuffed(ip)) |
| 647 | gfs2_unstuff_dinode(ip, NULL); | 646 | gfs2_unstuff_dinode(ip, NULL); |
| 648 | 647 | ||
| 648 | memset(&q, 0, sizeof(struct gfs2_quota)); | ||
| 649 | err = gfs2_internal_read(ip, NULL, (char *)&q, &loc, sizeof(q)); | ||
| 650 | if (err < 0) | ||
| 651 | return err; | ||
| 652 | |||
| 653 | err = -EIO; | ||
| 654 | qp = &q; | ||
| 655 | qp->qu_value = be64_to_cpu(qp->qu_value); | ||
| 656 | qp->qu_value += change; | ||
| 657 | qp->qu_value = cpu_to_be64(qp->qu_value); | ||
| 658 | qd->qd_qb.qb_value = qp->qu_value; | ||
| 659 | if (fdq) { | ||
| 660 | if (fdq->d_fieldmask & FS_DQ_BSOFT) { | ||
| 661 | qp->qu_warn = cpu_to_be64(fdq->d_blk_softlimit); | ||
| 662 | qd->qd_qb.qb_warn = qp->qu_warn; | ||
| 663 | } | ||
| 664 | if (fdq->d_fieldmask & FS_DQ_BHARD) { | ||
| 665 | qp->qu_limit = cpu_to_be64(fdq->d_blk_hardlimit); | ||
| 666 | qd->qd_qb.qb_limit = qp->qu_limit; | ||
| 667 | } | ||
| 668 | } | ||
| 669 | |||
| 670 | /* Write the quota into the quota file on disk */ | ||
| 671 | ptr = qp; | ||
| 672 | nbytes = sizeof(struct gfs2_quota); | ||
| 673 | get_a_page: | ||
| 649 | page = grab_cache_page(mapping, index); | 674 | page = grab_cache_page(mapping, index); |
| 650 | if (!page) | 675 | if (!page) |
| 651 | return -ENOMEM; | 676 | return -ENOMEM; |
| @@ -667,7 +692,12 @@ static int gfs2_adjust_quota(struct gfs2_inode *ip, loff_t loc, | |||
| 667 | if (!buffer_mapped(bh)) { | 692 | if (!buffer_mapped(bh)) { |
| 668 | gfs2_block_map(inode, iblock, bh, 1); | 693 | gfs2_block_map(inode, iblock, bh, 1); |
| 669 | if (!buffer_mapped(bh)) | 694 | if (!buffer_mapped(bh)) |
| 670 | goto unlock; | 695 | goto unlock_out; |
| 696 | /* If it's a newly allocated disk block for quota, zero it */ | ||
| 697 | if (buffer_new(bh)) { | ||
| 698 | memset(bh->b_data, 0, bh->b_size); | ||
| 699 | set_buffer_uptodate(bh); | ||
| 700 | } | ||
| 671 | } | 701 | } |
| 672 | 702 | ||
| 673 | if (PageUptodate(page)) | 703 | if (PageUptodate(page)) |
| @@ -677,32 +707,34 @@ static int gfs2_adjust_quota(struct gfs2_inode *ip, loff_t loc, | |||
| 677 | ll_rw_block(READ_META, 1, &bh); | 707 | ll_rw_block(READ_META, 1, &bh); |
| 678 | wait_on_buffer(bh); | 708 | wait_on_buffer(bh); |
| 679 | if (!buffer_uptodate(bh)) | 709 | if (!buffer_uptodate(bh)) |
| 680 | goto unlock; | 710 | goto unlock_out; |
| 681 | } | 711 | } |
| 682 | 712 | ||
| 683 | gfs2_trans_add_bh(ip->i_gl, bh, 0); | 713 | gfs2_trans_add_bh(ip->i_gl, bh, 0); |
| 684 | 714 | ||
| 685 | kaddr = kmap_atomic(page, KM_USER0); | 715 | kaddr = kmap_atomic(page, KM_USER0); |
| 686 | qp = kaddr + offset; | 716 | if (offset + sizeof(struct gfs2_quota) > PAGE_CACHE_SIZE) |
| 687 | value = (s64)be64_to_cpu(qp->qu_value) + change; | 717 | nbytes = PAGE_CACHE_SIZE - offset; |
| 688 | qp->qu_value = cpu_to_be64(value); | 718 | memcpy(kaddr + offset, ptr, nbytes); |
| 689 | qd->qd_qb.qb_value = qp->qu_value; | ||
| 690 | if (fdq) { | ||
| 691 | if (fdq->d_fieldmask & FS_DQ_BSOFT) { | ||
| 692 | qp->qu_warn = cpu_to_be64(fdq->d_blk_softlimit); | ||
| 693 | qd->qd_qb.qb_warn = qp->qu_warn; | ||
| 694 | } | ||
| 695 | if (fdq->d_fieldmask & FS_DQ_BHARD) { | ||
| 696 | qp->qu_limit = cpu_to_be64(fdq->d_blk_hardlimit); | ||
| 697 | qd->qd_qb.qb_limit = qp->qu_limit; | ||
| 698 | } | ||
| 699 | } | ||
| 700 | flush_dcache_page(page); | 719 | flush_dcache_page(page); |
| 701 | kunmap_atomic(kaddr, KM_USER0); | 720 | kunmap_atomic(kaddr, KM_USER0); |
| 721 | unlock_page(page); | ||
| 722 | page_cache_release(page); | ||
| 702 | 723 | ||
| 724 | /* If quota straddles page boundary, we need to update the rest of the | ||
| 725 | * quota at the beginning of the next page */ | ||
| 726 | if (offset != 0) { /* first page, offset is closer to PAGE_CACHE_SIZE */ | ||
| 727 | ptr = ptr + nbytes; | ||
| 728 | nbytes = sizeof(struct gfs2_quota) - nbytes; | ||
| 729 | offset = 0; | ||
| 730 | index++; | ||
| 731 | goto get_a_page; | ||
| 732 | } | ||
| 733 | |||
| 734 | /* Update the disk inode timestamp and size (if extended) */ | ||
| 703 | err = gfs2_meta_inode_buffer(ip, &dibh); | 735 | err = gfs2_meta_inode_buffer(ip, &dibh); |
| 704 | if (err) | 736 | if (err) |
| 705 | goto unlock; | 737 | goto out; |
| 706 | 738 | ||
| 707 | size = loc + sizeof(struct gfs2_quota); | 739 | size = loc + sizeof(struct gfs2_quota); |
| 708 | if (size > inode->i_size) { | 740 | if (size > inode->i_size) { |
| @@ -715,7 +747,9 @@ static int gfs2_adjust_quota(struct gfs2_inode *ip, loff_t loc, | |||
| 715 | brelse(dibh); | 747 | brelse(dibh); |
| 716 | mark_inode_dirty(inode); | 748 | mark_inode_dirty(inode); |
| 717 | 749 | ||
| 718 | unlock: | 750 | out: |
| 751 | return err; | ||
| 752 | unlock_out: | ||
| 719 | unlock_page(page); | 753 | unlock_page(page); |
| 720 | page_cache_release(page); | 754 | page_cache_release(page); |
| 721 | return err; | 755 | return err; |
| @@ -779,8 +813,10 @@ static int do_sync(unsigned int num_qd, struct gfs2_quota_data **qda) | |||
| 779 | * rgrp since it won't be allocated during the transaction | 813 | * rgrp since it won't be allocated during the transaction |
| 780 | */ | 814 | */ |
| 781 | al->al_requested = 1; | 815 | al->al_requested = 1; |
| 782 | /* +1 in the end for block requested above for unstuffing */ | 816 | /* +3 in the end for unstuffing block, inode size update block |
| 783 | blocks = num_qd * data_blocks + RES_DINODE + num_qd + 1; | 817 | * and another block in case quota straddles page boundary and |
| 818 | * two blocks need to be updated instead of 1 */ | ||
| 819 | blocks = num_qd * data_blocks + RES_DINODE + num_qd + 3; | ||
| 784 | 820 | ||
| 785 | if (nalloc) | 821 | if (nalloc) |
| 786 | al->al_requested += nalloc * (data_blocks + ind_blocks); | 822 | al->al_requested += nalloc * (data_blocks + ind_blocks); |
| @@ -1418,10 +1454,18 @@ static int gfs2_quota_get_xstate(struct super_block *sb, | |||
| 1418 | 1454 | ||
| 1419 | memset(fqs, 0, sizeof(struct fs_quota_stat)); | 1455 | memset(fqs, 0, sizeof(struct fs_quota_stat)); |
| 1420 | fqs->qs_version = FS_QSTAT_VERSION; | 1456 | fqs->qs_version = FS_QSTAT_VERSION; |
| 1421 | if (sdp->sd_args.ar_quota == GFS2_QUOTA_ON) | 1457 | |
| 1422 | fqs->qs_flags = (XFS_QUOTA_UDQ_ENFD | XFS_QUOTA_GDQ_ENFD); | 1458 | switch (sdp->sd_args.ar_quota) { |
| 1423 | else if (sdp->sd_args.ar_quota == GFS2_QUOTA_ACCOUNT) | 1459 | case GFS2_QUOTA_ON: |
| 1424 | fqs->qs_flags = (XFS_QUOTA_UDQ_ACCT | XFS_QUOTA_GDQ_ACCT); | 1460 | fqs->qs_flags |= (XFS_QUOTA_UDQ_ENFD | XFS_QUOTA_GDQ_ENFD); |
| 1461 | /*FALLTHRU*/ | ||
| 1462 | case GFS2_QUOTA_ACCOUNT: | ||
| 1463 | fqs->qs_flags |= (XFS_QUOTA_UDQ_ACCT | XFS_QUOTA_GDQ_ACCT); | ||
| 1464 | break; | ||
| 1465 | case GFS2_QUOTA_OFF: | ||
| 1466 | break; | ||
| 1467 | } | ||
| 1468 | |||
| 1425 | if (sdp->sd_quota_inode) { | 1469 | if (sdp->sd_quota_inode) { |
| 1426 | fqs->qs_uquota.qfs_ino = GFS2_I(sdp->sd_quota_inode)->i_no_addr; | 1470 | fqs->qs_uquota.qfs_ino = GFS2_I(sdp->sd_quota_inode)->i_no_addr; |
| 1427 | fqs->qs_uquota.qfs_nblks = sdp->sd_quota_inode->i_blocks; | 1471 | fqs->qs_uquota.qfs_nblks = sdp->sd_quota_inode->i_blocks; |
diff --git a/fs/gfs2/rgrp.c b/fs/gfs2/rgrp.c index 503b842f3ba2..8bce73ed4d8e 100644 --- a/fs/gfs2/rgrp.c +++ b/fs/gfs2/rgrp.c | |||
| @@ -948,13 +948,13 @@ static int try_rgrp_fit(struct gfs2_rgrpd *rgd, struct gfs2_alloc *al) | |||
| 948 | * try_rgrp_unlink - Look for any unlinked, allocated, but unused inodes | 948 | * try_rgrp_unlink - Look for any unlinked, allocated, but unused inodes |
| 949 | * @rgd: The rgrp | 949 | * @rgd: The rgrp |
| 950 | * | 950 | * |
| 951 | * Returns: The inode, if one has been found | 951 | * Returns: 0 if no error |
| 952 | * The inode, if one has been found, in inode. | ||
| 952 | */ | 953 | */ |
| 953 | 954 | ||
| 954 | static struct inode *try_rgrp_unlink(struct gfs2_rgrpd *rgd, u64 *last_unlinked, | 955 | static u64 try_rgrp_unlink(struct gfs2_rgrpd *rgd, u64 *last_unlinked, |
| 955 | u64 skip) | 956 | u64 skip) |
| 956 | { | 957 | { |
| 957 | struct inode *inode; | ||
| 958 | u32 goal = 0, block; | 958 | u32 goal = 0, block; |
| 959 | u64 no_addr; | 959 | u64 no_addr; |
| 960 | struct gfs2_sbd *sdp = rgd->rd_sbd; | 960 | struct gfs2_sbd *sdp = rgd->rd_sbd; |
| @@ -979,14 +979,11 @@ static struct inode *try_rgrp_unlink(struct gfs2_rgrpd *rgd, u64 *last_unlinked, | |||
| 979 | if (no_addr == skip) | 979 | if (no_addr == skip) |
| 980 | continue; | 980 | continue; |
| 981 | *last_unlinked = no_addr; | 981 | *last_unlinked = no_addr; |
| 982 | inode = gfs2_inode_lookup(rgd->rd_sbd->sd_vfs, DT_UNKNOWN, | 982 | return no_addr; |
| 983 | no_addr, -1, 1); | ||
| 984 | if (!IS_ERR(inode)) | ||
| 985 | return inode; | ||
| 986 | } | 983 | } |
| 987 | 984 | ||
| 988 | rgd->rd_flags &= ~GFS2_RDF_CHECK; | 985 | rgd->rd_flags &= ~GFS2_RDF_CHECK; |
| 989 | return NULL; | 986 | return 0; |
| 990 | } | 987 | } |
| 991 | 988 | ||
| 992 | /** | 989 | /** |
| @@ -1067,11 +1064,12 @@ static void forward_rgrp_set(struct gfs2_sbd *sdp, struct gfs2_rgrpd *rgd) | |||
| 1067 | * Try to acquire rgrp in way which avoids contending with others. | 1064 | * Try to acquire rgrp in way which avoids contending with others. |
| 1068 | * | 1065 | * |
| 1069 | * Returns: errno | 1066 | * Returns: errno |
| 1067 | * unlinked: the block address of an unlinked block to be reclaimed | ||
| 1070 | */ | 1068 | */ |
| 1071 | 1069 | ||
| 1072 | static struct inode *get_local_rgrp(struct gfs2_inode *ip, u64 *last_unlinked) | 1070 | static int get_local_rgrp(struct gfs2_inode *ip, u64 *unlinked, |
| 1071 | u64 *last_unlinked) | ||
| 1073 | { | 1072 | { |
| 1074 | struct inode *inode = NULL; | ||
| 1075 | struct gfs2_sbd *sdp = GFS2_SB(&ip->i_inode); | 1073 | struct gfs2_sbd *sdp = GFS2_SB(&ip->i_inode); |
| 1076 | struct gfs2_rgrpd *rgd, *begin = NULL; | 1074 | struct gfs2_rgrpd *rgd, *begin = NULL; |
| 1077 | struct gfs2_alloc *al = ip->i_alloc; | 1075 | struct gfs2_alloc *al = ip->i_alloc; |
| @@ -1080,6 +1078,7 @@ static struct inode *get_local_rgrp(struct gfs2_inode *ip, u64 *last_unlinked) | |||
| 1080 | int loops = 0; | 1078 | int loops = 0; |
| 1081 | int error, rg_locked; | 1079 | int error, rg_locked; |
| 1082 | 1080 | ||
| 1081 | *unlinked = 0; | ||
| 1083 | rgd = gfs2_blk2rgrpd(sdp, ip->i_goal); | 1082 | rgd = gfs2_blk2rgrpd(sdp, ip->i_goal); |
| 1084 | 1083 | ||
| 1085 | while (rgd) { | 1084 | while (rgd) { |
| @@ -1096,19 +1095,24 @@ static struct inode *get_local_rgrp(struct gfs2_inode *ip, u64 *last_unlinked) | |||
| 1096 | case 0: | 1095 | case 0: |
| 1097 | if (try_rgrp_fit(rgd, al)) | 1096 | if (try_rgrp_fit(rgd, al)) |
| 1098 | goto out; | 1097 | goto out; |
| 1099 | if (rgd->rd_flags & GFS2_RDF_CHECK) | 1098 | /* If the rg came in already locked, there's no |
| 1100 | inode = try_rgrp_unlink(rgd, last_unlinked, ip->i_no_addr); | 1099 | way we can recover from a failed try_rgrp_unlink |
| 1100 | because that would require an iput which can only | ||
| 1101 | happen after the rgrp is unlocked. */ | ||
| 1102 | if (!rg_locked && rgd->rd_flags & GFS2_RDF_CHECK) | ||
| 1103 | *unlinked = try_rgrp_unlink(rgd, last_unlinked, | ||
| 1104 | ip->i_no_addr); | ||
| 1101 | if (!rg_locked) | 1105 | if (!rg_locked) |
| 1102 | gfs2_glock_dq_uninit(&al->al_rgd_gh); | 1106 | gfs2_glock_dq_uninit(&al->al_rgd_gh); |
| 1103 | if (inode) | 1107 | if (*unlinked) |
| 1104 | return inode; | 1108 | return -EAGAIN; |
| 1105 | /* fall through */ | 1109 | /* fall through */ |
| 1106 | case GLR_TRYFAILED: | 1110 | case GLR_TRYFAILED: |
| 1107 | rgd = recent_rgrp_next(rgd); | 1111 | rgd = recent_rgrp_next(rgd); |
| 1108 | break; | 1112 | break; |
| 1109 | 1113 | ||
| 1110 | default: | 1114 | default: |
| 1111 | return ERR_PTR(error); | 1115 | return error; |
| 1112 | } | 1116 | } |
| 1113 | } | 1117 | } |
| 1114 | 1118 | ||
| @@ -1130,12 +1134,13 @@ static struct inode *get_local_rgrp(struct gfs2_inode *ip, u64 *last_unlinked) | |||
| 1130 | case 0: | 1134 | case 0: |
| 1131 | if (try_rgrp_fit(rgd, al)) | 1135 | if (try_rgrp_fit(rgd, al)) |
| 1132 | goto out; | 1136 | goto out; |
| 1133 | if (rgd->rd_flags & GFS2_RDF_CHECK) | 1137 | if (!rg_locked && rgd->rd_flags & GFS2_RDF_CHECK) |
| 1134 | inode = try_rgrp_unlink(rgd, last_unlinked, ip->i_no_addr); | 1138 | *unlinked = try_rgrp_unlink(rgd, last_unlinked, |
| 1139 | ip->i_no_addr); | ||
| 1135 | if (!rg_locked) | 1140 | if (!rg_locked) |
| 1136 | gfs2_glock_dq_uninit(&al->al_rgd_gh); | 1141 | gfs2_glock_dq_uninit(&al->al_rgd_gh); |
| 1137 | if (inode) | 1142 | if (*unlinked) |
| 1138 | return inode; | 1143 | return -EAGAIN; |
| 1139 | break; | 1144 | break; |
| 1140 | 1145 | ||
| 1141 | case GLR_TRYFAILED: | 1146 | case GLR_TRYFAILED: |
| @@ -1143,7 +1148,7 @@ static struct inode *get_local_rgrp(struct gfs2_inode *ip, u64 *last_unlinked) | |||
| 1143 | break; | 1148 | break; |
| 1144 | 1149 | ||
| 1145 | default: | 1150 | default: |
| 1146 | return ERR_PTR(error); | 1151 | return error; |
| 1147 | } | 1152 | } |
| 1148 | 1153 | ||
| 1149 | rgd = gfs2_rgrpd_get_next(rgd); | 1154 | rgd = gfs2_rgrpd_get_next(rgd); |
| @@ -1152,7 +1157,7 @@ static struct inode *get_local_rgrp(struct gfs2_inode *ip, u64 *last_unlinked) | |||
| 1152 | 1157 | ||
| 1153 | if (rgd == begin) { | 1158 | if (rgd == begin) { |
| 1154 | if (++loops >= 3) | 1159 | if (++loops >= 3) |
| 1155 | return ERR_PTR(-ENOSPC); | 1160 | return -ENOSPC; |
| 1156 | if (!skipped) | 1161 | if (!skipped) |
| 1157 | loops++; | 1162 | loops++; |
| 1158 | flags = 0; | 1163 | flags = 0; |
| @@ -1172,7 +1177,7 @@ out: | |||
| 1172 | forward_rgrp_set(sdp, rgd); | 1177 | forward_rgrp_set(sdp, rgd); |
| 1173 | } | 1178 | } |
| 1174 | 1179 | ||
| 1175 | return NULL; | 1180 | return 0; |
| 1176 | } | 1181 | } |
| 1177 | 1182 | ||
| 1178 | /** | 1183 | /** |
| @@ -1188,7 +1193,7 @@ int gfs2_inplace_reserve_i(struct gfs2_inode *ip, char *file, unsigned int line) | |||
| 1188 | struct gfs2_alloc *al = ip->i_alloc; | 1193 | struct gfs2_alloc *al = ip->i_alloc; |
| 1189 | struct inode *inode; | 1194 | struct inode *inode; |
| 1190 | int error = 0; | 1195 | int error = 0; |
| 1191 | u64 last_unlinked = NO_BLOCK; | 1196 | u64 last_unlinked = NO_BLOCK, unlinked; |
| 1192 | 1197 | ||
| 1193 | if (gfs2_assert_warn(sdp, al->al_requested)) | 1198 | if (gfs2_assert_warn(sdp, al->al_requested)) |
| 1194 | return -EINVAL; | 1199 | return -EINVAL; |
| @@ -1204,14 +1209,19 @@ try_again: | |||
| 1204 | if (error) | 1209 | if (error) |
| 1205 | return error; | 1210 | return error; |
| 1206 | 1211 | ||
| 1207 | inode = get_local_rgrp(ip, &last_unlinked); | 1212 | error = get_local_rgrp(ip, &unlinked, &last_unlinked); |
| 1208 | if (inode) { | 1213 | if (error) { |
| 1209 | if (ip != GFS2_I(sdp->sd_rindex)) | 1214 | if (ip != GFS2_I(sdp->sd_rindex)) |
| 1210 | gfs2_glock_dq_uninit(&al->al_ri_gh); | 1215 | gfs2_glock_dq_uninit(&al->al_ri_gh); |
| 1211 | if (IS_ERR(inode)) | 1216 | if (error != -EAGAIN) |
| 1212 | return PTR_ERR(inode); | 1217 | return error; |
| 1213 | iput(inode); | 1218 | error = gfs2_unlinked_inode_lookup(ip->i_inode.i_sb, |
| 1219 | unlinked, &inode); | ||
| 1220 | if (inode) | ||
| 1221 | iput(inode); | ||
| 1214 | gfs2_log_flush(sdp, NULL); | 1222 | gfs2_log_flush(sdp, NULL); |
| 1223 | if (error == GLR_TRYFAILED) | ||
| 1224 | error = 0; | ||
| 1215 | goto try_again; | 1225 | goto try_again; |
| 1216 | } | 1226 | } |
| 1217 | 1227 | ||
diff --git a/fs/gfs2/super.c b/fs/gfs2/super.c index 50aac606b990..4d1aad38f1b1 100644 --- a/fs/gfs2/super.c +++ b/fs/gfs2/super.c | |||
| @@ -1113,7 +1113,7 @@ static int gfs2_remount_fs(struct super_block *sb, int *flags, char *data) | |||
| 1113 | int error; | 1113 | int error; |
| 1114 | 1114 | ||
| 1115 | spin_lock(>->gt_spin); | 1115 | spin_lock(>->gt_spin); |
| 1116 | args.ar_commit = gt->gt_log_flush_secs; | 1116 | args.ar_commit = gt->gt_logd_secs; |
| 1117 | args.ar_quota_quantum = gt->gt_quota_quantum; | 1117 | args.ar_quota_quantum = gt->gt_quota_quantum; |
| 1118 | if (gt->gt_statfs_slow) | 1118 | if (gt->gt_statfs_slow) |
| 1119 | args.ar_statfs_quantum = 0; | 1119 | args.ar_statfs_quantum = 0; |
| @@ -1160,7 +1160,7 @@ static int gfs2_remount_fs(struct super_block *sb, int *flags, char *data) | |||
| 1160 | else | 1160 | else |
| 1161 | clear_bit(SDF_NOBARRIERS, &sdp->sd_flags); | 1161 | clear_bit(SDF_NOBARRIERS, &sdp->sd_flags); |
| 1162 | spin_lock(>->gt_spin); | 1162 | spin_lock(>->gt_spin); |
| 1163 | gt->gt_log_flush_secs = args.ar_commit; | 1163 | gt->gt_logd_secs = args.ar_commit; |
| 1164 | gt->gt_quota_quantum = args.ar_quota_quantum; | 1164 | gt->gt_quota_quantum = args.ar_quota_quantum; |
| 1165 | if (args.ar_statfs_quantum) { | 1165 | if (args.ar_statfs_quantum) { |
| 1166 | gt->gt_statfs_slow = 0; | 1166 | gt->gt_statfs_slow = 0; |
| @@ -1305,8 +1305,8 @@ static int gfs2_show_options(struct seq_file *s, struct vfsmount *mnt) | |||
| 1305 | } | 1305 | } |
| 1306 | if (args->ar_discard) | 1306 | if (args->ar_discard) |
| 1307 | seq_printf(s, ",discard"); | 1307 | seq_printf(s, ",discard"); |
| 1308 | val = sdp->sd_tune.gt_log_flush_secs; | 1308 | val = sdp->sd_tune.gt_logd_secs; |
| 1309 | if (val != 60) | 1309 | if (val != 30) |
| 1310 | seq_printf(s, ",commit=%d", val); | 1310 | seq_printf(s, ",commit=%d", val); |
| 1311 | val = sdp->sd_tune.gt_statfs_quantum; | 1311 | val = sdp->sd_tune.gt_statfs_quantum; |
| 1312 | if (val != 30) | 1312 | if (val != 30) |
| @@ -1334,7 +1334,8 @@ static int gfs2_show_options(struct seq_file *s, struct vfsmount *mnt) | |||
| 1334 | } | 1334 | } |
| 1335 | if (test_bit(SDF_NOBARRIERS, &sdp->sd_flags)) | 1335 | if (test_bit(SDF_NOBARRIERS, &sdp->sd_flags)) |
| 1336 | seq_printf(s, ",nobarrier"); | 1336 | seq_printf(s, ",nobarrier"); |
| 1337 | 1337 | if (test_bit(SDF_DEMOTE, &sdp->sd_flags)) | |
| 1338 | seq_printf(s, ",demote_interface_used"); | ||
| 1338 | return 0; | 1339 | return 0; |
| 1339 | } | 1340 | } |
| 1340 | 1341 | ||
diff --git a/fs/gfs2/sys.c b/fs/gfs2/sys.c index 54fd98425991..37f5393e68e6 100644 --- a/fs/gfs2/sys.c +++ b/fs/gfs2/sys.c | |||
| @@ -232,6 +232,8 @@ static ssize_t demote_rq_store(struct gfs2_sbd *sdp, const char *buf, size_t len | |||
| 232 | glops = gfs2_glops_list[gltype]; | 232 | glops = gfs2_glops_list[gltype]; |
| 233 | if (glops == NULL) | 233 | if (glops == NULL) |
| 234 | return -EINVAL; | 234 | return -EINVAL; |
| 235 | if (!test_and_set_bit(SDF_DEMOTE, &sdp->sd_flags)) | ||
| 236 | fs_info(sdp, "demote interface used\n"); | ||
| 235 | rv = gfs2_glock_get(sdp, glnum, glops, 0, &gl); | 237 | rv = gfs2_glock_get(sdp, glnum, glops, 0, &gl); |
| 236 | if (rv) | 238 | if (rv) |
| 237 | return rv; | 239 | return rv; |
| @@ -468,8 +470,6 @@ static ssize_t name##_store(struct gfs2_sbd *sdp, const char *buf, size_t len)\ | |||
| 468 | } \ | 470 | } \ |
| 469 | TUNE_ATTR_2(name, name##_store) | 471 | TUNE_ATTR_2(name, name##_store) |
| 470 | 472 | ||
| 471 | TUNE_ATTR(incore_log_blocks, 0); | ||
| 472 | TUNE_ATTR(log_flush_secs, 0); | ||
| 473 | TUNE_ATTR(quota_warn_period, 0); | 473 | TUNE_ATTR(quota_warn_period, 0); |
| 474 | TUNE_ATTR(quota_quantum, 0); | 474 | TUNE_ATTR(quota_quantum, 0); |
| 475 | TUNE_ATTR(max_readahead, 0); | 475 | TUNE_ATTR(max_readahead, 0); |
| @@ -481,8 +481,6 @@ TUNE_ATTR(statfs_quantum, 1); | |||
| 481 | TUNE_ATTR_3(quota_scale, quota_scale_show, quota_scale_store); | 481 | TUNE_ATTR_3(quota_scale, quota_scale_show, quota_scale_store); |
| 482 | 482 | ||
| 483 | static struct attribute *tune_attrs[] = { | 483 | static struct attribute *tune_attrs[] = { |
| 484 | &tune_attr_incore_log_blocks.attr, | ||
| 485 | &tune_attr_log_flush_secs.attr, | ||
| 486 | &tune_attr_quota_warn_period.attr, | 484 | &tune_attr_quota_warn_period.attr, |
| 487 | &tune_attr_quota_quantum.attr, | 485 | &tune_attr_quota_quantum.attr, |
| 488 | &tune_attr_max_readahead.attr, | 486 | &tune_attr_max_readahead.attr, |
diff --git a/fs/gfs2/trans.c b/fs/gfs2/trans.c index 4ef0e9fa3549..9ec73a854111 100644 --- a/fs/gfs2/trans.c +++ b/fs/gfs2/trans.c | |||
| @@ -23,6 +23,7 @@ | |||
| 23 | #include "meta_io.h" | 23 | #include "meta_io.h" |
| 24 | #include "trans.h" | 24 | #include "trans.h" |
| 25 | #include "util.h" | 25 | #include "util.h" |
| 26 | #include "trace_gfs2.h" | ||
| 26 | 27 | ||
| 27 | int gfs2_trans_begin(struct gfs2_sbd *sdp, unsigned int blocks, | 28 | int gfs2_trans_begin(struct gfs2_sbd *sdp, unsigned int blocks, |
| 28 | unsigned int revokes) | 29 | unsigned int revokes) |
| @@ -75,6 +76,23 @@ fail_holder_uninit: | |||
| 75 | return error; | 76 | return error; |
| 76 | } | 77 | } |
| 77 | 78 | ||
| 79 | /** | ||
| 80 | * gfs2_log_release - Release a given number of log blocks | ||
| 81 | * @sdp: The GFS2 superblock | ||
| 82 | * @blks: The number of blocks | ||
| 83 | * | ||
| 84 | */ | ||
| 85 | |||
| 86 | static void gfs2_log_release(struct gfs2_sbd *sdp, unsigned int blks) | ||
| 87 | { | ||
| 88 | |||
| 89 | atomic_add(blks, &sdp->sd_log_blks_free); | ||
| 90 | trace_gfs2_log_blocks(sdp, blks); | ||
| 91 | gfs2_assert_withdraw(sdp, atomic_read(&sdp->sd_log_blks_free) <= | ||
| 92 | sdp->sd_jdesc->jd_blocks); | ||
| 93 | up_read(&sdp->sd_log_flush_lock); | ||
| 94 | } | ||
| 95 | |||
| 78 | void gfs2_trans_end(struct gfs2_sbd *sdp) | 96 | void gfs2_trans_end(struct gfs2_sbd *sdp) |
| 79 | { | 97 | { |
| 80 | struct gfs2_trans *tr = current->journal_info; | 98 | struct gfs2_trans *tr = current->journal_info; |
