aboutsummaryrefslogtreecommitdiffstats
path: root/fs/jfs
diff options
context:
space:
mode:
Diffstat (limited to 'fs/jfs')
-rw-r--r--fs/jfs/jfs_dmap.c4
-rw-r--r--fs/jfs/jfs_extent.c6
-rw-r--r--fs/jfs/jfs_imap.c14
-rw-r--r--fs/jfs/jfs_logmgr.h2
-rw-r--r--fs/jfs/jfs_metapage.h2
-rw-r--r--fs/jfs/jfs_txnmgr.c2
-rw-r--r--fs/jfs/resize.c4
-rw-r--r--fs/jfs/super.c2
8 files changed, 18 insertions, 18 deletions
diff --git a/fs/jfs/jfs_dmap.c b/fs/jfs/jfs_dmap.c
index c92ea3b3ea5e..4496872cf4e7 100644
--- a/fs/jfs/jfs_dmap.c
+++ b/fs/jfs/jfs_dmap.c
@@ -1649,7 +1649,7 @@ static int dbFindCtl(struct bmap * bmp, int l2nb, int level, s64 * blkno)
1649 } 1649 }
1650 1650
1651 /* search the tree within the dmap control page for 1651 /* search the tree within the dmap control page for
1652 * sufficent free space. if sufficient free space is found, 1652 * sufficient free space. if sufficient free space is found,
1653 * dbFindLeaf() returns the index of the leaf at which 1653 * dbFindLeaf() returns the index of the leaf at which
1654 * free space was found. 1654 * free space was found.
1655 */ 1655 */
@@ -2744,7 +2744,7 @@ static int dbJoin(dmtree_t * tp, int leafno, int newval)
2744 /* check which (leafno or buddy) is the left buddy. 2744 /* check which (leafno or buddy) is the left buddy.
2745 * the left buddy gets to claim the blocks resulting 2745 * the left buddy gets to claim the blocks resulting
2746 * from the join while the right gets to claim none. 2746 * from the join while the right gets to claim none.
2747 * the left buddy is also eligable to participate in 2747 * the left buddy is also eligible to participate in
2748 * a join at the next higher level while the right 2748 * a join at the next higher level while the right
2749 * is not. 2749 * is not.
2750 * 2750 *
diff --git a/fs/jfs/jfs_extent.c b/fs/jfs/jfs_extent.c
index 5d3bbd10f8db..e5fe8506ed16 100644
--- a/fs/jfs/jfs_extent.c
+++ b/fs/jfs/jfs_extent.c
@@ -126,7 +126,7 @@ extAlloc(struct inode *ip, s64 xlen, s64 pno, xad_t * xp, bool abnr)
126 126
127 /* allocate the disk blocks for the extent. initially, extBalloc() 127 /* allocate the disk blocks for the extent. initially, extBalloc()
128 * will try to allocate disk blocks for the requested size (xlen). 128 * will try to allocate disk blocks for the requested size (xlen).
129 * if this fails (xlen contiguous free blocks not avaliable), it'll 129 * if this fails (xlen contiguous free blocks not available), it'll
130 * try to allocate a smaller number of blocks (producing a smaller 130 * try to allocate a smaller number of blocks (producing a smaller
131 * extent), with this smaller number of blocks consisting of the 131 * extent), with this smaller number of blocks consisting of the
132 * requested number of blocks rounded down to the next smaller 132 * requested number of blocks rounded down to the next smaller
@@ -481,7 +481,7 @@ int extFill(struct inode *ip, xad_t * xp)
481 * 481 *
482 * initially, we will try to allocate disk blocks for the 482 * initially, we will try to allocate disk blocks for the
483 * requested size (nblocks). if this fails (nblocks 483 * requested size (nblocks). if this fails (nblocks
484 * contiguous free blocks not avaliable), we'll try to allocate 484 * contiguous free blocks not available), we'll try to allocate
485 * a smaller number of blocks (producing a smaller extent), with 485 * a smaller number of blocks (producing a smaller extent), with
486 * this smaller number of blocks consisting of the requested 486 * this smaller number of blocks consisting of the requested
487 * number of blocks rounded down to the next smaller power of 2 487 * number of blocks rounded down to the next smaller power of 2
@@ -575,7 +575,7 @@ extBalloc(struct inode *ip, s64 hint, s64 * nblocks, s64 * blkno)
575 * to a new set of blocks. If moving the extent, we initially 575 * to a new set of blocks. If moving the extent, we initially
576 * will try to allocate disk blocks for the requested size 576 * will try to allocate disk blocks for the requested size
577 * (newnblks). if this fails (new contiguous free blocks not 577 * (newnblks). if this fails (new contiguous free blocks not
578 * avaliable), we'll try to allocate a smaller number of 578 * available), we'll try to allocate a smaller number of
579 * blocks (producing a smaller extent), with this smaller 579 * blocks (producing a smaller extent), with this smaller
580 * number of blocks consisting of the requested number of 580 * number of blocks consisting of the requested number of
581 * blocks rounded down to the next smaller power of 2 581 * blocks rounded down to the next smaller power of 2
diff --git a/fs/jfs/jfs_imap.c b/fs/jfs/jfs_imap.c
index 3a09423b6c22..ed53a4740168 100644
--- a/fs/jfs/jfs_imap.c
+++ b/fs/jfs/jfs_imap.c
@@ -1069,7 +1069,7 @@ int diFree(struct inode *ip)
1069 */ 1069 */
1070 if (iagp->nfreeexts == cpu_to_le32(EXTSPERIAG - 1)) { 1070 if (iagp->nfreeexts == cpu_to_le32(EXTSPERIAG - 1)) {
1071 /* in preparation for removing the iag from the 1071 /* in preparation for removing the iag from the
1072 * ag extent free list, read the iags preceeding 1072 * ag extent free list, read the iags preceding
1073 * and following the iag on the ag extent free 1073 * and following the iag on the ag extent free
1074 * list. 1074 * list.
1075 */ 1075 */
@@ -1095,7 +1095,7 @@ int diFree(struct inode *ip)
1095 int inofreefwd = le32_to_cpu(iagp->inofreefwd); 1095 int inofreefwd = le32_to_cpu(iagp->inofreefwd);
1096 1096
1097 /* in preparation for removing the iag from the 1097 /* in preparation for removing the iag from the
1098 * ag inode free list, read the iags preceeding 1098 * ag inode free list, read the iags preceding
1099 * and following the iag on the ag inode free 1099 * and following the iag on the ag inode free
1100 * list. before reading these iags, we must make 1100 * list. before reading these iags, we must make
1101 * sure that we already don't have them in hand 1101 * sure that we already don't have them in hand
@@ -1681,7 +1681,7 @@ diAllocAG(struct inomap * imap, int agno, bool dir, struct inode *ip)
1681 * try to allocate a new extent of free inodes. 1681 * try to allocate a new extent of free inodes.
1682 */ 1682 */
1683 if (addext) { 1683 if (addext) {
1684 /* if free space is not avaliable for this new extent, try 1684 /* if free space is not available for this new extent, try
1685 * below to allocate a free and existing (already backed) 1685 * below to allocate a free and existing (already backed)
1686 * inode from the ag. 1686 * inode from the ag.
1687 */ 1687 */
@@ -2036,7 +2036,7 @@ static int diAllocBit(struct inomap * imap, struct iag * iagp, int ino)
2036 2036
2037 /* check if this is the last free inode within the iag. 2037 /* check if this is the last free inode within the iag.
2038 * if so, it will have to be removed from the ag free 2038 * if so, it will have to be removed from the ag free
2039 * inode list, so get the iags preceeding and following 2039 * inode list, so get the iags preceding and following
2040 * it on the list. 2040 * it on the list.
2041 */ 2041 */
2042 if (iagp->nfreeinos == cpu_to_le32(1)) { 2042 if (iagp->nfreeinos == cpu_to_le32(1)) {
@@ -2208,7 +2208,7 @@ static int diNewExt(struct inomap * imap, struct iag * iagp, int extno)
2208 2208
2209 /* check if this is the last free extent within the 2209 /* check if this is the last free extent within the
2210 * iag. if so, the iag must be removed from the ag 2210 * iag. if so, the iag must be removed from the ag
2211 * free extent list, so get the iags preceeding and 2211 * free extent list, so get the iags preceding and
2212 * following the iag on this list. 2212 * following the iag on this list.
2213 */ 2213 */
2214 if (iagp->nfreeexts == cpu_to_le32(1)) { 2214 if (iagp->nfreeexts == cpu_to_le32(1)) {
@@ -2504,7 +2504,7 @@ diNewIAG(struct inomap * imap, int *iagnop, int agno, struct metapage ** mpp)
2504 } 2504 }
2505 2505
2506 2506
2507 /* get the next avaliable iag number */ 2507 /* get the next available iag number */
2508 iagno = imap->im_nextiag; 2508 iagno = imap->im_nextiag;
2509 2509
2510 /* make sure that we have not exceeded the maximum inode 2510 /* make sure that we have not exceeded the maximum inode
@@ -2615,7 +2615,7 @@ diNewIAG(struct inomap * imap, int *iagnop, int agno, struct metapage ** mpp)
2615 2615
2616 duplicateIXtree(sb, blkno, xlen, &xaddr); 2616 duplicateIXtree(sb, blkno, xlen, &xaddr);
2617 2617
2618 /* update the next avaliable iag number */ 2618 /* update the next available iag number */
2619 imap->im_nextiag += 1; 2619 imap->im_nextiag += 1;
2620 2620
2621 /* Add the iag to the iag free list so we don't lose the iag 2621 /* Add the iag to the iag free list so we don't lose the iag
diff --git a/fs/jfs/jfs_logmgr.h b/fs/jfs/jfs_logmgr.h
index 9236bc49ae7f..e38c21598850 100644
--- a/fs/jfs/jfs_logmgr.h
+++ b/fs/jfs/jfs_logmgr.h
@@ -288,7 +288,7 @@ struct lrd {
288 /* 288 /*
289 * SYNCPT: log sync point 289 * SYNCPT: log sync point
290 * 290 *
291 * replay log upto syncpt address specified; 291 * replay log up to syncpt address specified;
292 */ 292 */
293 struct { 293 struct {
294 __le32 sync; /* 4: syncpt address (0 = here) */ 294 __le32 sync; /* 4: syncpt address (0 = here) */
diff --git a/fs/jfs/jfs_metapage.h b/fs/jfs/jfs_metapage.h
index d94f8d9e87d7..a78beda85f68 100644
--- a/fs/jfs/jfs_metapage.h
+++ b/fs/jfs/jfs_metapage.h
@@ -75,7 +75,7 @@ extern void grab_metapage(struct metapage *);
75extern void force_metapage(struct metapage *); 75extern void force_metapage(struct metapage *);
76 76
77/* 77/*
78 * hold_metapage and put_metapage are used in conjuction. The page lock 78 * hold_metapage and put_metapage are used in conjunction. The page lock
79 * is not dropped between the two, so no other threads can get or release 79 * is not dropped between the two, so no other threads can get or release
80 * the metapage 80 * the metapage
81 */ 81 */
diff --git a/fs/jfs/jfs_txnmgr.c b/fs/jfs/jfs_txnmgr.c
index 9466957ec841..f6cc0c09ec63 100644
--- a/fs/jfs/jfs_txnmgr.c
+++ b/fs/jfs/jfs_txnmgr.c
@@ -636,7 +636,7 @@ struct tlock *txLock(tid_t tid, struct inode *ip, struct metapage * mp,
636 * the inode of the page and available to all anonymous 636 * the inode of the page and available to all anonymous
637 * transactions until txCommit() time at which point 637 * transactions until txCommit() time at which point
638 * they are transferred to the transaction tlock list of 638 * they are transferred to the transaction tlock list of
639 * the commiting transaction of the inode) 639 * the committing transaction of the inode)
640 */ 640 */
641 if (xtid == 0) { 641 if (xtid == 0) {
642 tlck->tid = tid; 642 tlck->tid = tid;
diff --git a/fs/jfs/resize.c b/fs/jfs/resize.c
index 1aba0039f1c9..8ea5efb5a34e 100644
--- a/fs/jfs/resize.c
+++ b/fs/jfs/resize.c
@@ -57,7 +57,7 @@
57 * 2. compute new FSCKSize from new LVSize; 57 * 2. compute new FSCKSize from new LVSize;
58 * 3. set new FSSize as MIN(FSSize, LVSize-(LogSize+FSCKSize)) where 58 * 3. set new FSSize as MIN(FSSize, LVSize-(LogSize+FSCKSize)) where
59 * assert(new FSSize >= old FSSize), 59 * assert(new FSSize >= old FSSize),
60 * i.e., file system must not be shrinked; 60 * i.e., file system must not be shrunk;
61 */ 61 */
62int jfs_extendfs(struct super_block *sb, s64 newLVSize, int newLogSize) 62int jfs_extendfs(struct super_block *sb, s64 newLVSize, int newLogSize)
63{ 63{
@@ -182,7 +182,7 @@ int jfs_extendfs(struct super_block *sb, s64 newLVSize, int newLogSize)
182 */ 182 */
183 newFSSize = newLVSize - newLogSize - newFSCKSize; 183 newFSSize = newLVSize - newLogSize - newFSCKSize;
184 184
185 /* file system cannot be shrinked */ 185 /* file system cannot be shrunk */
186 if (newFSSize < bmp->db_mapsize) { 186 if (newFSSize < bmp->db_mapsize) {
187 rc = -EINVAL; 187 rc = -EINVAL;
188 goto out; 188 goto out;
diff --git a/fs/jfs/super.c b/fs/jfs/super.c
index eeca48a031ab..06c8a67cbe76 100644
--- a/fs/jfs/super.c
+++ b/fs/jfs/super.c
@@ -644,7 +644,7 @@ static int jfs_show_options(struct seq_file *seq, struct vfsmount *vfs)
644 644
645/* Read data from quotafile - avoid pagecache and such because we cannot afford 645/* Read data from quotafile - avoid pagecache and such because we cannot afford
646 * acquiring the locks... As quota files are never truncated and quota code 646 * acquiring the locks... As quota files are never truncated and quota code
647 * itself serializes the operations (and noone else should touch the files) 647 * itself serializes the operations (and no one else should touch the files)
648 * we don't have to be afraid of races */ 648 * we don't have to be afraid of races */
649static ssize_t jfs_quota_read(struct super_block *sb, int type, char *data, 649static ssize_t jfs_quota_read(struct super_block *sb, int type, char *data,
650 size_t len, loff_t off) 650 size_t len, loff_t off)