aboutsummaryrefslogtreecommitdiffstats
path: root/fs/jfs
diff options
context:
space:
mode:
Diffstat (limited to 'fs/jfs')
-rw-r--r--fs/jfs/acl.c11
-rw-r--r--fs/jfs/inode.c4
-rw-r--r--fs/jfs/jfs_acl.h2
-rw-r--r--fs/jfs/jfs_dmap.c344
-rw-r--r--fs/jfs/jfs_dtree.c212
-rw-r--r--fs/jfs/jfs_dtree.h7
-rw-r--r--fs/jfs/jfs_imap.c105
-rw-r--r--fs/jfs/jfs_logmgr.c43
-rw-r--r--fs/jfs/jfs_logmgr.h2
-rw-r--r--fs/jfs/jfs_metapage.c11
-rw-r--r--fs/jfs/jfs_txnmgr.c20
-rw-r--r--fs/jfs/jfs_unicode.c7
-rw-r--r--fs/jfs/jfs_xtree.c340
-rw-r--r--fs/jfs/jfs_xtree.h6
-rw-r--r--fs/jfs/super.c5
-rw-r--r--fs/jfs/symlink.c4
-rw-r--r--fs/jfs/xattr.c13
17 files changed, 110 insertions, 1026 deletions
diff --git a/fs/jfs/acl.c b/fs/jfs/acl.c
index 30a2bf9eed..e892dab40c 100644
--- a/fs/jfs/acl.c
+++ b/fs/jfs/acl.c
@@ -21,6 +21,7 @@
21#include <linux/sched.h> 21#include <linux/sched.h>
22#include <linux/fs.h> 22#include <linux/fs.h>
23#include <linux/quotaops.h> 23#include <linux/quotaops.h>
24#include <linux/posix_acl_xattr.h>
24#include "jfs_incore.h" 25#include "jfs_incore.h"
25#include "jfs_xattr.h" 26#include "jfs_xattr.h"
26#include "jfs_acl.h" 27#include "jfs_acl.h"
@@ -36,11 +37,11 @@ static struct posix_acl *jfs_get_acl(struct inode *inode, int type)
36 37
37 switch(type) { 38 switch(type) {
38 case ACL_TYPE_ACCESS: 39 case ACL_TYPE_ACCESS:
39 ea_name = XATTR_NAME_ACL_ACCESS; 40 ea_name = POSIX_ACL_XATTR_ACCESS;
40 p_acl = &ji->i_acl; 41 p_acl = &ji->i_acl;
41 break; 42 break;
42 case ACL_TYPE_DEFAULT: 43 case ACL_TYPE_DEFAULT:
43 ea_name = XATTR_NAME_ACL_DEFAULT; 44 ea_name = POSIX_ACL_XATTR_DEFAULT;
44 p_acl = &ji->i_default_acl; 45 p_acl = &ji->i_default_acl;
45 break; 46 break;
46 default: 47 default:
@@ -88,11 +89,11 @@ static int jfs_set_acl(struct inode *inode, int type, struct posix_acl *acl)
88 89
89 switch(type) { 90 switch(type) {
90 case ACL_TYPE_ACCESS: 91 case ACL_TYPE_ACCESS:
91 ea_name = XATTR_NAME_ACL_ACCESS; 92 ea_name = POSIX_ACL_XATTR_ACCESS;
92 p_acl = &ji->i_acl; 93 p_acl = &ji->i_acl;
93 break; 94 break;
94 case ACL_TYPE_DEFAULT: 95 case ACL_TYPE_DEFAULT:
95 ea_name = XATTR_NAME_ACL_DEFAULT; 96 ea_name = POSIX_ACL_XATTR_DEFAULT;
96 p_acl = &ji->i_default_acl; 97 p_acl = &ji->i_default_acl;
97 if (!S_ISDIR(inode->i_mode)) 98 if (!S_ISDIR(inode->i_mode))
98 return acl ? -EACCES : 0; 99 return acl ? -EACCES : 0;
@@ -101,7 +102,7 @@ static int jfs_set_acl(struct inode *inode, int type, struct posix_acl *acl)
101 return -EINVAL; 102 return -EINVAL;
102 } 103 }
103 if (acl) { 104 if (acl) {
104 size = xattr_acl_size(acl->a_count); 105 size = posix_acl_xattr_size(acl->a_count);
105 value = kmalloc(size, GFP_KERNEL); 106 value = kmalloc(size, GFP_KERNEL);
106 if (!value) 107 if (!value)
107 return -ENOMEM; 108 return -ENOMEM;
diff --git a/fs/jfs/inode.c b/fs/jfs/inode.c
index 2137138c59..767c7ecb42 100644
--- a/fs/jfs/inode.c
+++ b/fs/jfs/inode.c
@@ -128,6 +128,10 @@ void jfs_delete_inode(struct inode *inode)
128{ 128{
129 jfs_info("In jfs_delete_inode, inode = 0x%p", inode); 129 jfs_info("In jfs_delete_inode, inode = 0x%p", inode);
130 130
131 if (is_bad_inode(inode) ||
132 (JFS_IP(inode)->fileset != cpu_to_le32(FILESYSTEM_I)))
133 return;
134
131 if (test_cflag(COMMIT_Freewmap, inode)) 135 if (test_cflag(COMMIT_Freewmap, inode))
132 jfs_free_zero_link(inode); 136 jfs_free_zero_link(inode);
133 137
diff --git a/fs/jfs/jfs_acl.h b/fs/jfs/jfs_acl.h
index d2ae430ade..a3acd3eec0 100644
--- a/fs/jfs/jfs_acl.h
+++ b/fs/jfs/jfs_acl.h
@@ -20,8 +20,6 @@
20 20
21#ifdef CONFIG_JFS_POSIX_ACL 21#ifdef CONFIG_JFS_POSIX_ACL
22 22
23#include <linux/xattr_acl.h>
24
25int jfs_permission(struct inode *, int, struct nameidata *); 23int jfs_permission(struct inode *, int, struct nameidata *);
26int jfs_init_acl(struct inode *, struct inode *); 24int jfs_init_acl(struct inode *, struct inode *);
27int jfs_setattr(struct dentry *, struct iattr *); 25int jfs_setattr(struct dentry *, struct iattr *);
diff --git a/fs/jfs/jfs_dmap.c b/fs/jfs/jfs_dmap.c
index cced2fed9d..c739626f5b 100644
--- a/fs/jfs/jfs_dmap.c
+++ b/fs/jfs/jfs_dmap.c
@@ -26,36 +26,6 @@
26#include "jfs_debug.h" 26#include "jfs_debug.h"
27 27
28/* 28/*
29 * Debug code for double-checking block map
30 */
31/* #define _JFS_DEBUG_DMAP 1 */
32
33#ifdef _JFS_DEBUG_DMAP
34#define DBINITMAP(size,ipbmap,results) \
35 DBinitmap(size,ipbmap,results)
36#define DBALLOC(dbmap,mapsize,blkno,nblocks) \
37 DBAlloc(dbmap,mapsize,blkno,nblocks)
38#define DBFREE(dbmap,mapsize,blkno,nblocks) \
39 DBFree(dbmap,mapsize,blkno,nblocks)
40#define DBALLOCCK(dbmap,mapsize,blkno,nblocks) \
41 DBAllocCK(dbmap,mapsize,blkno,nblocks)
42#define DBFREECK(dbmap,mapsize,blkno,nblocks) \
43 DBFreeCK(dbmap,mapsize,blkno,nblocks)
44
45static void DBinitmap(s64, struct inode *, u32 **);
46static void DBAlloc(uint *, s64, s64, s64);
47static void DBFree(uint *, s64, s64, s64);
48static void DBAllocCK(uint *, s64, s64, s64);
49static void DBFreeCK(uint *, s64, s64, s64);
50#else
51#define DBINITMAP(size,ipbmap,results)
52#define DBALLOC(dbmap, mapsize, blkno, nblocks)
53#define DBFREE(dbmap, mapsize, blkno, nblocks)
54#define DBALLOCCK(dbmap, mapsize, blkno, nblocks)
55#define DBFREECK(dbmap, mapsize, blkno, nblocks)
56#endif /* _JFS_DEBUG_DMAP */
57
58/*
59 * SERIALIZATION of the Block Allocation Map. 29 * SERIALIZATION of the Block Allocation Map.
60 * 30 *
61 * the working state of the block allocation map is accessed in 31 * the working state of the block allocation map is accessed in
@@ -105,7 +75,7 @@ static void dbAllocBits(struct bmap * bmp, struct dmap * dp, s64 blkno,
105 int nblocks); 75 int nblocks);
106static void dbSplit(dmtree_t * tp, int leafno, int splitsz, int newval); 76static void dbSplit(dmtree_t * tp, int leafno, int splitsz, int newval);
107static void dbBackSplit(dmtree_t * tp, int leafno); 77static void dbBackSplit(dmtree_t * tp, int leafno);
108static void dbJoin(dmtree_t * tp, int leafno, int newval); 78static int dbJoin(dmtree_t * tp, int leafno, int newval);
109static void dbAdjTree(dmtree_t * tp, int leafno, int newval); 79static void dbAdjTree(dmtree_t * tp, int leafno, int newval);
110static int dbAdjCtl(struct bmap * bmp, s64 blkno, int newval, int alloc, 80static int dbAdjCtl(struct bmap * bmp, s64 blkno, int newval, int alloc,
111 int level); 81 int level);
@@ -128,8 +98,8 @@ static int dbExtend(struct inode *ip, s64 blkno, s64 nblocks, s64 addnblocks);
128static int dbFindBits(u32 word, int l2nb); 98static int dbFindBits(u32 word, int l2nb);
129static int dbFindCtl(struct bmap * bmp, int l2nb, int level, s64 * blkno); 99static int dbFindCtl(struct bmap * bmp, int l2nb, int level, s64 * blkno);
130static int dbFindLeaf(dmtree_t * tp, int l2nb, int *leafidx); 100static int dbFindLeaf(dmtree_t * tp, int l2nb, int *leafidx);
131static void dbFreeBits(struct bmap * bmp, struct dmap * dp, s64 blkno, 101static int dbFreeBits(struct bmap * bmp, struct dmap * dp, s64 blkno,
132 int nblocks); 102 int nblocks);
133static int dbFreeDmap(struct bmap * bmp, struct dmap * dp, s64 blkno, 103static int dbFreeDmap(struct bmap * bmp, struct dmap * dp, s64 blkno,
134 int nblocks); 104 int nblocks);
135static int dbMaxBud(u8 * cp); 105static int dbMaxBud(u8 * cp);
@@ -242,7 +212,6 @@ int dbMount(struct inode *ipbmap)
242 JFS_SBI(ipbmap->i_sb)->bmap = bmp; 212 JFS_SBI(ipbmap->i_sb)->bmap = bmp;
243 213
244 memset(bmp->db_active, 0, sizeof(bmp->db_active)); 214 memset(bmp->db_active, 0, sizeof(bmp->db_active));
245 DBINITMAP(bmp->db_mapsize, ipbmap, &bmp->db_DBmap);
246 215
247 /* 216 /*
248 * allocate/initialize the bmap lock 217 * allocate/initialize the bmap lock
@@ -407,16 +376,13 @@ int dbFree(struct inode *ip, s64 blkno, s64 nblocks)
407 */ 376 */
408 nb = min(rem, BPERDMAP - (blkno & (BPERDMAP - 1))); 377 nb = min(rem, BPERDMAP - (blkno & (BPERDMAP - 1)));
409 378
410 DBALLOCCK(bmp->db_DBmap, bmp->db_mapsize, blkno, nb);
411
412 /* free the blocks. */ 379 /* free the blocks. */
413 if ((rc = dbFreeDmap(bmp, dp, blkno, nb))) { 380 if ((rc = dbFreeDmap(bmp, dp, blkno, nb))) {
381 jfs_error(ip->i_sb, "dbFree: error in block map\n");
414 release_metapage(mp); 382 release_metapage(mp);
415 IREAD_UNLOCK(ipbmap); 383 IREAD_UNLOCK(ipbmap);
416 return (rc); 384 return (rc);
417 } 385 }
418
419 DBFREE(bmp->db_DBmap, bmp->db_mapsize, blkno, nb);
420 } 386 }
421 387
422 /* write the last buffer. */ 388 /* write the last buffer. */
@@ -775,10 +741,6 @@ int dbAlloc(struct inode *ip, s64 hint, s64 nblocks, s64 * results)
775 IWRITE_LOCK(ipbmap); 741 IWRITE_LOCK(ipbmap);
776 742
777 rc = dbAllocAny(bmp, nblocks, l2nb, results); 743 rc = dbAllocAny(bmp, nblocks, l2nb, results);
778 if (rc == 0) {
779 DBALLOC(bmp->db_DBmap, bmp->db_mapsize, *results,
780 nblocks);
781 }
782 744
783 goto write_unlock; 745 goto write_unlock;
784 } 746 }
@@ -836,8 +798,6 @@ int dbAlloc(struct inode *ip, s64 hint, s64 nblocks, s64 * results)
836 != -ENOSPC) { 798 != -ENOSPC) {
837 if (rc == 0) { 799 if (rc == 0) {
838 *results = blkno; 800 *results = blkno;
839 DBALLOC(bmp->db_DBmap, bmp->db_mapsize,
840 *results, nblocks);
841 mark_metapage_dirty(mp); 801 mark_metapage_dirty(mp);
842 } 802 }
843 803
@@ -863,11 +823,8 @@ int dbAlloc(struct inode *ip, s64 hint, s64 nblocks, s64 * results)
863 if ((rc = 823 if ((rc =
864 dbAllocNear(bmp, dp, blkno, (int) nblocks, l2nb, results)) 824 dbAllocNear(bmp, dp, blkno, (int) nblocks, l2nb, results))
865 != -ENOSPC) { 825 != -ENOSPC) {
866 if (rc == 0) { 826 if (rc == 0)
867 DBALLOC(bmp->db_DBmap, bmp->db_mapsize,
868 *results, nblocks);
869 mark_metapage_dirty(mp); 827 mark_metapage_dirty(mp);
870 }
871 828
872 release_metapage(mp); 829 release_metapage(mp);
873 goto read_unlock; 830 goto read_unlock;
@@ -878,11 +835,8 @@ int dbAlloc(struct inode *ip, s64 hint, s64 nblocks, s64 * results)
878 */ 835 */
879 if ((rc = dbAllocDmapLev(bmp, dp, (int) nblocks, l2nb, results)) 836 if ((rc = dbAllocDmapLev(bmp, dp, (int) nblocks, l2nb, results))
880 != -ENOSPC) { 837 != -ENOSPC) {
881 if (rc == 0) { 838 if (rc == 0)
882 DBALLOC(bmp->db_DBmap, bmp->db_mapsize,
883 *results, nblocks);
884 mark_metapage_dirty(mp); 839 mark_metapage_dirty(mp);
885 }
886 840
887 release_metapage(mp); 841 release_metapage(mp);
888 goto read_unlock; 842 goto read_unlock;
@@ -896,13 +850,9 @@ int dbAlloc(struct inode *ip, s64 hint, s64 nblocks, s64 * results)
896 * the same allocation group as the hint. 850 * the same allocation group as the hint.
897 */ 851 */
898 IWRITE_LOCK(ipbmap); 852 IWRITE_LOCK(ipbmap);
899 if ((rc = dbAllocAG(bmp, agno, nblocks, l2nb, results)) 853 if ((rc = dbAllocAG(bmp, agno, nblocks, l2nb, results)) != -ENOSPC)
900 != -ENOSPC) {
901 if (rc == 0)
902 DBALLOC(bmp->db_DBmap, bmp->db_mapsize,
903 *results, nblocks);
904 goto write_unlock; 854 goto write_unlock;
905 } 855
906 IWRITE_UNLOCK(ipbmap); 856 IWRITE_UNLOCK(ipbmap);
907 857
908 858
@@ -918,9 +868,6 @@ int dbAlloc(struct inode *ip, s64 hint, s64 nblocks, s64 * results)
918 */ 868 */
919 if ((rc = dbAllocAG(bmp, agno, nblocks, l2nb, results)) == -ENOSPC) 869 if ((rc = dbAllocAG(bmp, agno, nblocks, l2nb, results)) == -ENOSPC)
920 rc = dbAllocAny(bmp, nblocks, l2nb, results); 870 rc = dbAllocAny(bmp, nblocks, l2nb, results);
921 if (rc == 0) {
922 DBALLOC(bmp->db_DBmap, bmp->db_mapsize, *results, nblocks);
923 }
924 871
925 write_unlock: 872 write_unlock:
926 IWRITE_UNLOCK(ipbmap); 873 IWRITE_UNLOCK(ipbmap);
@@ -992,10 +939,9 @@ int dbAllocExact(struct inode *ip, s64 blkno, int nblocks)
992 939
993 IREAD_UNLOCK(ipbmap); 940 IREAD_UNLOCK(ipbmap);
994 941
995 if (rc == 0) { 942 if (rc == 0)
996 DBALLOC(bmp->db_DBmap, bmp->db_mapsize, blkno, nblocks);
997 mark_metapage_dirty(mp); 943 mark_metapage_dirty(mp);
998 } 944
999 release_metapage(mp); 945 release_metapage(mp);
1000 946
1001 return (rc); 947 return (rc);
@@ -1144,7 +1090,6 @@ static int dbExtend(struct inode *ip, s64 blkno, s64 nblocks, s64 addnblocks)
1144 return -EIO; 1090 return -EIO;
1145 } 1091 }
1146 1092
1147 DBALLOCCK(bmp->db_DBmap, bmp->db_mapsize, blkno, nblocks);
1148 dp = (struct dmap *) mp->data; 1093 dp = (struct dmap *) mp->data;
1149 1094
1150 /* try to allocate the blocks immediately following the 1095 /* try to allocate the blocks immediately following the
@@ -1155,11 +1100,9 @@ static int dbExtend(struct inode *ip, s64 blkno, s64 nblocks, s64 addnblocks)
1155 IREAD_UNLOCK(ipbmap); 1100 IREAD_UNLOCK(ipbmap);
1156 1101
1157 /* were we successful ? */ 1102 /* were we successful ? */
1158 if (rc == 0) { 1103 if (rc == 0)
1159 DBALLOC(bmp->db_DBmap, bmp->db_mapsize, extblkno,
1160 addnblocks);
1161 write_metapage(mp); 1104 write_metapage(mp);
1162 } else 1105 else
1163 /* we were not successful */ 1106 /* we were not successful */
1164 release_metapage(mp); 1107 release_metapage(mp);
1165 1108
@@ -2078,7 +2021,7 @@ static int dbFreeDmap(struct bmap * bmp, struct dmap * dp, s64 blkno,
2078 int nblocks) 2021 int nblocks)
2079{ 2022{
2080 s8 oldroot; 2023 s8 oldroot;
2081 int rc, word; 2024 int rc = 0, word;
2082 2025
2083 /* save the current value of the root (i.e. maximum free string) 2026 /* save the current value of the root (i.e. maximum free string)
2084 * of the dmap tree. 2027 * of the dmap tree.
@@ -2086,11 +2029,11 @@ static int dbFreeDmap(struct bmap * bmp, struct dmap * dp, s64 blkno,
2086 oldroot = dp->tree.stree[ROOT]; 2029 oldroot = dp->tree.stree[ROOT];
2087 2030
2088 /* free the specified (blocks) bits */ 2031 /* free the specified (blocks) bits */
2089 dbFreeBits(bmp, dp, blkno, nblocks); 2032 rc = dbFreeBits(bmp, dp, blkno, nblocks);
2090 2033
2091 /* if the root has not changed, done. */ 2034 /* if error or the root has not changed, done. */
2092 if (dp->tree.stree[ROOT] == oldroot) 2035 if (rc || (dp->tree.stree[ROOT] == oldroot))
2093 return (0); 2036 return (rc);
2094 2037
2095 /* root changed. bubble the change up to the dmap control pages. 2038 /* root changed. bubble the change up to the dmap control pages.
2096 * if the adjustment of the upper level control pages fails, 2039 * if the adjustment of the upper level control pages fails,
@@ -2279,15 +2222,16 @@ static void dbAllocBits(struct bmap * bmp, struct dmap * dp, s64 blkno,
2279 * blkno - starting block number of the bits to be freed. 2222 * blkno - starting block number of the bits to be freed.
2280 * nblocks - number of bits to be freed. 2223 * nblocks - number of bits to be freed.
2281 * 2224 *
2282 * RETURN VALUES: none 2225 * RETURN VALUES: 0 for success
2283 * 2226 *
2284 * serialization: IREAD_LOCK(ipbmap) or IWRITE_LOCK(ipbmap) held on entry/exit; 2227 * serialization: IREAD_LOCK(ipbmap) or IWRITE_LOCK(ipbmap) held on entry/exit;
2285 */ 2228 */
2286static void dbFreeBits(struct bmap * bmp, struct dmap * dp, s64 blkno, 2229static int dbFreeBits(struct bmap * bmp, struct dmap * dp, s64 blkno,
2287 int nblocks) 2230 int nblocks)
2288{ 2231{
2289 int dbitno, word, rembits, nb, nwords, wbitno, nw, agno; 2232 int dbitno, word, rembits, nb, nwords, wbitno, nw, agno;
2290 dmtree_t *tp = (dmtree_t *) & dp->tree; 2233 dmtree_t *tp = (dmtree_t *) & dp->tree;
2234 int rc = 0;
2291 int size; 2235 int size;
2292 2236
2293 /* determine the bit number and word within the dmap of the 2237 /* determine the bit number and word within the dmap of the
@@ -2336,8 +2280,10 @@ static void dbFreeBits(struct bmap * bmp, struct dmap * dp, s64 blkno,
2336 2280
2337 /* update the leaf for this dmap word. 2281 /* update the leaf for this dmap word.
2338 */ 2282 */
2339 dbJoin(tp, word, 2283 rc = dbJoin(tp, word,
2340 dbMaxBud((u8 *) & dp->wmap[word])); 2284 dbMaxBud((u8 *) & dp->wmap[word]));
2285 if (rc)
2286 return rc;
2341 2287
2342 word += 1; 2288 word += 1;
2343 } else { 2289 } else {
@@ -2368,7 +2314,9 @@ static void dbFreeBits(struct bmap * bmp, struct dmap * dp, s64 blkno,
2368 2314
2369 /* update the leaf. 2315 /* update the leaf.
2370 */ 2316 */
2371 dbJoin(tp, word, size); 2317 rc = dbJoin(tp, word, size);
2318 if (rc)
2319 return rc;
2372 2320
2373 /* get the number of dmap words handled. 2321 /* get the number of dmap words handled.
2374 */ 2322 */
@@ -2415,6 +2363,8 @@ static void dbFreeBits(struct bmap * bmp, struct dmap * dp, s64 blkno,
2415 } 2363 }
2416 2364
2417 BMAP_UNLOCK(bmp); 2365 BMAP_UNLOCK(bmp);
2366
2367 return 0;
2418} 2368}
2419 2369
2420 2370
@@ -2522,7 +2472,9 @@ dbAdjCtl(struct bmap * bmp, s64 blkno, int newval, int alloc, int level)
2522 } 2472 }
2523 dbSplit((dmtree_t *) dcp, leafno, dcp->budmin, newval); 2473 dbSplit((dmtree_t *) dcp, leafno, dcp->budmin, newval);
2524 } else { 2474 } else {
2525 dbJoin((dmtree_t *) dcp, leafno, newval); 2475 rc = dbJoin((dmtree_t *) dcp, leafno, newval);
2476 if (rc)
2477 return rc;
2526 } 2478 }
2527 2479
2528 /* check if the root of the current dmap control page changed due 2480 /* check if the root of the current dmap control page changed due
@@ -2747,7 +2699,7 @@ static void dbBackSplit(dmtree_t * tp, int leafno)
2747 * 2699 *
2748 * RETURN VALUES: none 2700 * RETURN VALUES: none
2749 */ 2701 */
2750static void dbJoin(dmtree_t * tp, int leafno, int newval) 2702static int dbJoin(dmtree_t * tp, int leafno, int newval)
2751{ 2703{
2752 int budsz, buddy; 2704 int budsz, buddy;
2753 s8 *leaf; 2705 s8 *leaf;
@@ -2787,7 +2739,9 @@ static void dbJoin(dmtree_t * tp, int leafno, int newval)
2787 if (newval > leaf[buddy]) 2739 if (newval > leaf[buddy])
2788 break; 2740 break;
2789 2741
2790 assert(newval == leaf[buddy]); 2742 /* It shouldn't be less */
2743 if (newval < leaf[buddy])
2744 return -EIO;
2791 2745
2792 /* check which (leafno or buddy) is the left buddy. 2746 /* check which (leafno or buddy) is the left buddy.
2793 * the left buddy gets to claim the blocks resulting 2747 * the left buddy gets to claim the blocks resulting
@@ -2819,6 +2773,8 @@ static void dbJoin(dmtree_t * tp, int leafno, int newval)
2819 /* update the leaf value. 2773 /* update the leaf value.
2820 */ 2774 */
2821 dbAdjTree(tp, leafno, newval); 2775 dbAdjTree(tp, leafno, newval);
2776
2777 return 0;
2822} 2778}
2823 2779
2824 2780
@@ -3185,16 +3141,12 @@ int dbAllocBottomUp(struct inode *ip, s64 blkno, s64 nblocks)
3185 */ 3141 */
3186 nb = min(rem, BPERDMAP - (blkno & (BPERDMAP - 1))); 3142 nb = min(rem, BPERDMAP - (blkno & (BPERDMAP - 1)));
3187 3143
3188 DBFREECK(bmp->db_DBmap, bmp->db_mapsize, blkno, nb);
3189
3190 /* allocate the blocks. */ 3144 /* allocate the blocks. */
3191 if ((rc = dbAllocDmapBU(bmp, dp, blkno, nb))) { 3145 if ((rc = dbAllocDmapBU(bmp, dp, blkno, nb))) {
3192 release_metapage(mp); 3146 release_metapage(mp);
3193 IREAD_UNLOCK(ipbmap); 3147 IREAD_UNLOCK(ipbmap);
3194 return (rc); 3148 return (rc);
3195 } 3149 }
3196
3197 DBALLOC(bmp->db_DBmap, bmp->db_mapsize, blkno, nb);
3198 } 3150 }
3199 3151
3200 /* write the last buffer. */ 3152 /* write the last buffer. */
@@ -4041,223 +3993,3 @@ s64 dbMapFileSizeToMapSize(struct inode * ipbmap)
4041 3993
4042 return (nblocks); 3994 return (nblocks);
4043} 3995}
4044
4045
4046#ifdef _JFS_DEBUG_DMAP
4047/*
4048 * DBinitmap()
4049 */
4050static void DBinitmap(s64 size, struct inode *ipbmap, u32 ** results)
4051{
4052 int npages;
4053 u32 *dbmap, *d;
4054 int n;
4055 s64 lblkno, cur_block;
4056 struct dmap *dp;
4057 struct metapage *mp;
4058
4059 npages = size / 32768;
4060 npages += (size % 32768) ? 1 : 0;
4061
4062 dbmap = (u32 *) xmalloc(npages * 4096, L2PSIZE, kernel_heap);
4063 if (dbmap == NULL)
4064 BUG(); /* Not robust since this is only unused debug code */
4065
4066 for (n = 0, d = dbmap; n < npages; n++, d += 1024)
4067 bzero(d, 4096);
4068
4069 /* Need to initialize from disk map pages
4070 */
4071 for (d = dbmap, cur_block = 0; cur_block < size;
4072 cur_block += BPERDMAP, d += LPERDMAP) {
4073 lblkno = BLKTODMAP(cur_block,
4074 JFS_SBI(ipbmap->i_sb)->bmap->
4075 db_l2nbperpage);
4076 mp = read_metapage(ipbmap, lblkno, PSIZE, 0);
4077 if (mp == NULL) {
4078 jfs_error(ipbmap->i_sb,
4079 "DBinitmap: could not read disk map page");
4080 continue;
4081 }
4082 dp = (struct dmap *) mp->data;
4083
4084 for (n = 0; n < LPERDMAP; n++)
4085 d[n] = le32_to_cpu(dp->wmap[n]);
4086
4087 release_metapage(mp);
4088 }
4089
4090 *results = dbmap;
4091}
4092
4093
4094/*
4095 * DBAlloc()
4096 */
4097void DBAlloc(uint * dbmap, s64 mapsize, s64 blkno, s64 nblocks)
4098{
4099 int word, nb, bitno;
4100 u32 mask;
4101
4102 assert(blkno > 0 && blkno < mapsize);
4103 assert(nblocks > 0 && nblocks <= mapsize);
4104
4105 assert(blkno + nblocks <= mapsize);
4106
4107 dbmap += (blkno / 32);
4108 while (nblocks > 0) {
4109 bitno = blkno & (32 - 1);
4110 nb = min(nblocks, 32 - bitno);
4111
4112 mask = (0xffffffff << (32 - nb) >> bitno);
4113 assert((mask & *dbmap) == 0);
4114 *dbmap |= mask;
4115
4116 dbmap++;
4117 blkno += nb;
4118 nblocks -= nb;
4119 }
4120}
4121
4122
4123/*
4124 * DBFree()
4125 */
4126static void DBFree(uint * dbmap, s64 mapsize, s64 blkno, s64 nblocks)
4127{
4128 int word, nb, bitno;
4129 u32 mask;
4130
4131 assert(blkno > 0 && blkno < mapsize);
4132 assert(nblocks > 0 && nblocks <= mapsize);
4133
4134 assert(blkno + nblocks <= mapsize);
4135
4136 dbmap += (blkno / 32);
4137 while (nblocks > 0) {
4138 bitno = blkno & (32 - 1);
4139 nb = min(nblocks, 32 - bitno);
4140
4141 mask = (0xffffffff << (32 - nb) >> bitno);
4142 assert((mask & *dbmap) == mask);
4143 *dbmap &= ~mask;
4144
4145 dbmap++;
4146 blkno += nb;
4147 nblocks -= nb;
4148 }
4149}
4150
4151
4152/*
4153 * DBAllocCK()
4154 */
4155static void DBAllocCK(uint * dbmap, s64 mapsize, s64 blkno, s64 nblocks)
4156{
4157 int word, nb, bitno;
4158 u32 mask;
4159
4160 assert(blkno > 0 && blkno < mapsize);
4161 assert(nblocks > 0 && nblocks <= mapsize);
4162
4163 assert(blkno + nblocks <= mapsize);
4164
4165 dbmap += (blkno / 32);
4166 while (nblocks > 0) {
4167 bitno = blkno & (32 - 1);
4168 nb = min(nblocks, 32 - bitno);
4169
4170 mask = (0xffffffff << (32 - nb) >> bitno);
4171 assert((mask & *dbmap) == mask);
4172
4173 dbmap++;
4174 blkno += nb;
4175 nblocks -= nb;
4176 }
4177}
4178
4179
4180/*
4181 * DBFreeCK()
4182 */
4183static void DBFreeCK(uint * dbmap, s64 mapsize, s64 blkno, s64 nblocks)
4184{
4185 int word, nb, bitno;
4186 u32 mask;
4187
4188 assert(blkno > 0 && blkno < mapsize);
4189 assert(nblocks > 0 && nblocks <= mapsize);
4190
4191 assert(blkno + nblocks <= mapsize);
4192
4193 dbmap += (blkno / 32);
4194 while (nblocks > 0) {
4195 bitno = blkno & (32 - 1);
4196 nb = min(nblocks, 32 - bitno);
4197
4198 mask = (0xffffffff << (32 - nb) >> bitno);
4199 assert((mask & *dbmap) == 0);
4200
4201 dbmap++;
4202 blkno += nb;
4203 nblocks -= nb;
4204 }
4205}
4206
4207
4208/*
4209 * dbPrtMap()
4210 */
4211static void dbPrtMap(struct bmap * bmp)
4212{
4213 printk(" mapsize: %d%d\n", bmp->db_mapsize);
4214 printk(" nfree: %d%d\n", bmp->db_nfree);
4215 printk(" numag: %d\n", bmp->db_numag);
4216 printk(" agsize: %d%d\n", bmp->db_agsize);
4217 printk(" agl2size: %d\n", bmp->db_agl2size);
4218 printk(" agwidth: %d\n", bmp->db_agwidth);
4219 printk(" agstart: %d\n", bmp->db_agstart);
4220 printk(" agheigth: %d\n", bmp->db_agheigth);
4221 printk(" aglevel: %d\n", bmp->db_aglevel);
4222 printk(" maxlevel: %d\n", bmp->db_maxlevel);
4223 printk(" maxag: %d\n", bmp->db_maxag);
4224 printk(" agpref: %d\n", bmp->db_agpref);
4225 printk(" l2nbppg: %d\n", bmp->db_l2nbperpage);
4226}
4227
4228
4229/*
4230 * dbPrtCtl()
4231 */
4232static void dbPrtCtl(struct dmapctl * dcp)
4233{
4234 int i, j, n;
4235
4236 printk(" height: %08x\n", le32_to_cpu(dcp->height));
4237 printk(" leafidx: %08x\n", le32_to_cpu(dcp->leafidx));
4238 printk(" budmin: %08x\n", dcp->budmin);
4239 printk(" nleafs: %08x\n", le32_to_cpu(dcp->nleafs));
4240 printk(" l2nleafs: %08x\n", le32_to_cpu(dcp->l2nleafs));
4241
4242 printk("\n Tree:\n");
4243 for (i = 0; i < CTLLEAFIND; i += 8) {
4244 n = min(8, CTLLEAFIND - i);
4245
4246 for (j = 0; j < n; j++)
4247 printf(" [%03x]: %02x", i + j,
4248 (char) dcp->stree[i + j]);
4249 printf("\n");
4250 }
4251
4252 printk("\n Tree Leaves:\n");
4253 for (i = 0; i < LPERCTL; i += 8) {
4254 n = min(8, LPERCTL - i);
4255
4256 for (j = 0; j < n; j++)
4257 printf(" [%03x]: %02x",
4258 i + j,
4259 (char) dcp->stree[i + j + CTLLEAFIND]);
4260 printf("\n");
4261 }
4262}
4263#endif /* _JFS_DEBUG_DMAP */
diff --git a/fs/jfs/jfs_dtree.c b/fs/jfs/jfs_dtree.c
index 8676aee3ae..404f33eae5 100644
--- a/fs/jfs/jfs_dtree.c
+++ b/fs/jfs/jfs_dtree.c
@@ -381,9 +381,12 @@ static u32 add_index(tid_t tid, struct inode *ip, s64 bn, int slot)
381 * It's time to move the inline table to an external 381 * It's time to move the inline table to an external
382 * page and begin to build the xtree 382 * page and begin to build the xtree
383 */ 383 */
384 if (DQUOT_ALLOC_BLOCK(ip, sbi->nbperpage) || 384 if (DQUOT_ALLOC_BLOCK(ip, sbi->nbperpage))
385 dbAlloc(ip, 0, sbi->nbperpage, &xaddr)) 385 goto clean_up;
386 goto clean_up; /* No space */ 386 if (dbAlloc(ip, 0, sbi->nbperpage, &xaddr)) {
387 DQUOT_FREE_BLOCK(ip, sbi->nbperpage);
388 goto clean_up;
389 }
387 390
388 /* 391 /*
389 * Save the table, we're going to overwrite it with the 392 * Save the table, we're going to overwrite it with the
@@ -397,13 +400,15 @@ static u32 add_index(tid_t tid, struct inode *ip, s64 bn, int slot)
397 xtInitRoot(tid, ip); 400 xtInitRoot(tid, ip);
398 401
399 /* 402 /*
400 * Allocate the first block & add it to the xtree 403 * Add the first block to the xtree
401 */ 404 */
402 if (xtInsert(tid, ip, 0, 0, sbi->nbperpage, &xaddr, 0)) { 405 if (xtInsert(tid, ip, 0, 0, sbi->nbperpage, &xaddr, 0)) {
403 /* This really shouldn't fail */ 406 /* This really shouldn't fail */
404 jfs_warn("add_index: xtInsert failed!"); 407 jfs_warn("add_index: xtInsert failed!");
405 memcpy(&jfs_ip->i_dirtable, temp_table, 408 memcpy(&jfs_ip->i_dirtable, temp_table,
406 sizeof (temp_table)); 409 sizeof (temp_table));
410 dbFree(ip, xaddr, sbi->nbperpage);
411 DQUOT_FREE_BLOCK(ip, sbi->nbperpage);
407 goto clean_up; 412 goto clean_up;
408 } 413 }
409 ip->i_size = PSIZE; 414 ip->i_size = PSIZE;
@@ -4554,202 +4559,3 @@ int dtModify(tid_t tid, struct inode *ip,
4554 4559
4555 return 0; 4560 return 0;
4556} 4561}
4557
4558#ifdef _JFS_DEBUG_DTREE
4559/*
4560 * dtDisplayTree()
4561 *
4562 * function: traverse forward
4563 */
4564int dtDisplayTree(struct inode *ip)
4565{
4566 int rc;
4567 struct metapage *mp;
4568 dtpage_t *p;
4569 s64 bn, pbn;
4570 int index, lastindex, v, h;
4571 pxd_t *xd;
4572 struct btstack btstack;
4573 struct btframe *btsp;
4574 struct btframe *parent;
4575 u8 *stbl;
4576 int psize = 256;
4577
4578 printk("display B+-tree.\n");
4579
4580 /* clear stack */
4581 btsp = btstack.stack;
4582
4583 /*
4584 * start with root
4585 *
4586 * root resides in the inode
4587 */
4588 bn = 0;
4589 v = h = 0;
4590
4591 /*
4592 * first access of each page:
4593 */
4594 newPage:
4595 DT_GETPAGE(ip, bn, mp, psize, p, rc);
4596 if (rc)
4597 return rc;
4598
4599 /* process entries forward from first index */
4600 index = 0;
4601 lastindex = p->header.nextindex - 1;
4602
4603 if (p->header.flag & BT_INTERNAL) {
4604 /*
4605 * first access of each internal page
4606 */
4607 printf("internal page ");
4608 dtDisplayPage(ip, bn, p);
4609
4610 goto getChild;
4611 } else { /* (p->header.flag & BT_LEAF) */
4612
4613 /*
4614 * first access of each leaf page
4615 */
4616 printf("leaf page ");
4617 dtDisplayPage(ip, bn, p);
4618
4619 /*
4620 * process leaf page entries
4621 *
4622 for ( ; index <= lastindex; index++)
4623 {
4624 }
4625 */
4626
4627 /* unpin the leaf page */
4628 DT_PUTPAGE(mp);
4629 }
4630
4631 /*
4632 * go back up to the parent page
4633 */
4634 getParent:
4635 /* pop/restore parent entry for the current child page */
4636 if ((parent = (btsp == btstack.stack ? NULL : --btsp)) == NULL)
4637 /* current page must have been root */
4638 return;
4639
4640 /*
4641 * parent page scan completed
4642 */
4643 if ((index = parent->index) == (lastindex = parent->lastindex)) {
4644 /* go back up to the parent page */
4645 goto getParent;
4646 }
4647
4648 /*
4649 * parent page has entries remaining
4650 */
4651 /* get back the parent page */
4652 bn = parent->bn;
4653 /* v = parent->level; */
4654 DT_GETPAGE(ip, bn, mp, PSIZE, p, rc);
4655 if (rc)
4656 return rc;
4657
4658 /* get next parent entry */
4659 index++;
4660
4661 /*
4662 * internal page: go down to child page of current entry
4663 */
4664 getChild:
4665 /* push/save current parent entry for the child page */
4666 btsp->bn = pbn = bn;
4667 btsp->index = index;
4668 btsp->lastindex = lastindex;
4669 /* btsp->level = v; */
4670 /* btsp->node = h; */
4671 ++btsp;
4672
4673 /* get current entry for the child page */
4674 stbl = DT_GETSTBL(p);
4675 xd = (pxd_t *) & p->slot[stbl[index]];
4676
4677 /*
4678 * first access of each internal entry:
4679 */
4680
4681 /* get child page */
4682 bn = addressPXD(xd);
4683 psize = lengthPXD(xd) << ip->i_ipmnt->i_l2bsize;
4684
4685 printk("traverse down 0x%Lx[%d]->0x%Lx\n", pbn, index, bn);
4686 v++;
4687 h = index;
4688
4689 /* release parent page */
4690 DT_PUTPAGE(mp);
4691
4692 /* process the child page */
4693 goto newPage;
4694}
4695
4696
4697/*
4698 * dtDisplayPage()
4699 *
4700 * function: display page
4701 */
4702int dtDisplayPage(struct inode *ip, s64 bn, dtpage_t * p)
4703{
4704 int rc;
4705 struct metapage *mp;
4706 struct ldtentry *lh;
4707 struct idtentry *ih;
4708 pxd_t *xd;
4709 int i, j;
4710 u8 *stbl;
4711 wchar_t name[JFS_NAME_MAX + 1];
4712 struct component_name key = { 0, name };
4713 int freepage = 0;
4714
4715 if (p == NULL) {
4716 freepage = 1;
4717 DT_GETPAGE(ip, bn, mp, PSIZE, p, rc);
4718 if (rc)
4719 return rc;
4720 }
4721
4722 /* display page control */
4723 printk("bn:0x%Lx flag:0x%08x nextindex:%d\n",
4724 bn, p->header.flag, p->header.nextindex);
4725
4726 /* display entries */
4727 stbl = DT_GETSTBL(p);
4728 for (i = 0, j = 1; i < p->header.nextindex; i++, j++) {
4729 dtGetKey(p, i, &key, JFS_SBI(ip->i_sb)->mntflag);
4730 key.name[key.namlen] = '\0';
4731 if (p->header.flag & BT_LEAF) {
4732 lh = (struct ldtentry *) & p->slot[stbl[i]];
4733 printf("\t[%d] %s:%d", i, key.name,
4734 le32_to_cpu(lh->inumber));
4735 } else {
4736 ih = (struct idtentry *) & p->slot[stbl[i]];
4737 xd = (pxd_t *) ih;
4738 bn = addressPXD(xd);
4739 printf("\t[%d] %s:0x%Lx", i, key.name, bn);
4740 }
4741
4742 if (j == 4) {
4743 printf("\n");
4744 j = 0;
4745 }
4746 }
4747
4748 printf("\n");
4749
4750 if (freepage)
4751 DT_PUTPAGE(mp);
4752
4753 return 0;
4754}
4755#endif /* _JFS_DEBUG_DTREE */
diff --git a/fs/jfs/jfs_dtree.h b/fs/jfs/jfs_dtree.h
index 273a80130c..13e4fdf077 100644
--- a/fs/jfs/jfs_dtree.h
+++ b/fs/jfs/jfs_dtree.h
@@ -269,11 +269,4 @@ extern int dtModify(tid_t tid, struct inode *ip, struct component_name * key,
269 ino_t * orig_ino, ino_t new_ino, int flag); 269 ino_t * orig_ino, ino_t new_ino, int flag);
270 270
271extern int jfs_readdir(struct file *filp, void *dirent, filldir_t filldir); 271extern int jfs_readdir(struct file *filp, void *dirent, filldir_t filldir);
272
273#ifdef _JFS_DEBUG_DTREE
274extern int dtDisplayTree(struct inode *ip);
275
276extern int dtDisplayPage(struct inode *ip, s64 bn, dtpage_t * p);
277#endif /* _JFS_DEBUG_DTREE */
278
279#endif /* !_H_JFS_DTREE */ 272#endif /* !_H_JFS_DTREE */
diff --git a/fs/jfs/jfs_imap.c b/fs/jfs/jfs_imap.c
index 971af2977e..4021d46da7 100644
--- a/fs/jfs/jfs_imap.c
+++ b/fs/jfs/jfs_imap.c
@@ -87,25 +87,6 @@ static int copy_from_dinode(struct dinode *, struct inode *);
87static void copy_to_dinode(struct dinode *, struct inode *); 87static void copy_to_dinode(struct dinode *, struct inode *);
88 88
89/* 89/*
90 * debug code for double-checking inode map
91 */
92/* #define _JFS_DEBUG_IMAP 1 */
93
94#ifdef _JFS_DEBUG_IMAP
95#define DBG_DIINIT(imap) DBGdiInit(imap)
96#define DBG_DIALLOC(imap, ino) DBGdiAlloc(imap, ino)
97#define DBG_DIFREE(imap, ino) DBGdiFree(imap, ino)
98
99static void *DBGdiInit(struct inomap * imap);
100static void DBGdiAlloc(struct inomap * imap, ino_t ino);
101static void DBGdiFree(struct inomap * imap, ino_t ino);
102#else
103#define DBG_DIINIT(imap)
104#define DBG_DIALLOC(imap, ino)
105#define DBG_DIFREE(imap, ino)
106#endif /* _JFS_DEBUG_IMAP */
107
108/*
109 * NAME: diMount() 90 * NAME: diMount()
110 * 91 *
111 * FUNCTION: initialize the incore inode map control structures for 92 * FUNCTION: initialize the incore inode map control structures for
@@ -188,8 +169,6 @@ int diMount(struct inode *ipimap)
188 imap->im_ipimap = ipimap; 169 imap->im_ipimap = ipimap;
189 JFS_IP(ipimap)->i_imap = imap; 170 JFS_IP(ipimap)->i_imap = imap;
190 171
191// DBG_DIINIT(imap);
192
193 return (0); 172 return (0);
194} 173}
195 174
@@ -1043,7 +1022,6 @@ int diFree(struct inode *ip)
1043 /* update the bitmap. 1022 /* update the bitmap.
1044 */ 1023 */
1045 iagp->wmap[extno] = cpu_to_le32(bitmap); 1024 iagp->wmap[extno] = cpu_to_le32(bitmap);
1046 DBG_DIFREE(imap, inum);
1047 1025
1048 /* update the free inode counts at the iag, ag and 1026 /* update the free inode counts at the iag, ag and
1049 * map level. 1027 * map level.
@@ -1231,7 +1209,6 @@ int diFree(struct inode *ip)
1231 jfs_error(ip->i_sb, "diFree: the pmap does not show inode free"); 1209 jfs_error(ip->i_sb, "diFree: the pmap does not show inode free");
1232 } 1210 }
1233 iagp->wmap[extno] = 0; 1211 iagp->wmap[extno] = 0;
1234 DBG_DIFREE(imap, inum);
1235 PXDlength(&iagp->inoext[extno], 0); 1212 PXDlength(&iagp->inoext[extno], 0);
1236 PXDaddress(&iagp->inoext[extno], 0); 1213 PXDaddress(&iagp->inoext[extno], 0);
1237 1214
@@ -1350,7 +1327,6 @@ diInitInode(struct inode *ip, int iagno, int ino, int extno, struct iag * iagp)
1350 struct jfs_inode_info *jfs_ip = JFS_IP(ip); 1327 struct jfs_inode_info *jfs_ip = JFS_IP(ip);
1351 1328
1352 ip->i_ino = (iagno << L2INOSPERIAG) + ino; 1329 ip->i_ino = (iagno << L2INOSPERIAG) + ino;
1353 DBG_DIALLOC(JFS_IP(ipimap)->i_imap, ip->i_ino);
1354 jfs_ip->ixpxd = iagp->inoext[extno]; 1330 jfs_ip->ixpxd = iagp->inoext[extno];
1355 jfs_ip->agno = BLKTOAG(le64_to_cpu(iagp->agstart), sbi); 1331 jfs_ip->agno = BLKTOAG(le64_to_cpu(iagp->agstart), sbi);
1356 jfs_ip->active_ag = -1; 1332 jfs_ip->active_ag = -1;
@@ -3185,84 +3161,3 @@ static void copy_to_dinode(struct dinode * dip, struct inode *ip)
3185 if (S_ISCHR(ip->i_mode) || S_ISBLK(ip->i_mode)) 3161 if (S_ISCHR(ip->i_mode) || S_ISBLK(ip->i_mode))
3186 dip->di_rdev = cpu_to_le32(jfs_ip->dev); 3162 dip->di_rdev = cpu_to_le32(jfs_ip->dev);
3187} 3163}
3188
3189#ifdef _JFS_DEBUG_IMAP
3190/*
3191 * DBGdiInit()
3192 */
3193static void *DBGdiInit(struct inomap * imap)
3194{
3195 u32 *dimap;
3196 int size;
3197 size = 64 * 1024;
3198 if ((dimap = (u32 *) xmalloc(size, L2PSIZE, kernel_heap)) == NULL)
3199 assert(0);
3200 bzero((void *) dimap, size);
3201 imap->im_DBGdimap = dimap;
3202}
3203
3204/*
3205 * DBGdiAlloc()
3206 */
3207static void DBGdiAlloc(struct inomap * imap, ino_t ino)
3208{
3209 u32 *dimap = imap->im_DBGdimap;
3210 int w, b;
3211 u32 m;
3212 w = ino >> 5;
3213 b = ino & 31;
3214 m = 0x80000000 >> b;
3215 assert(w < 64 * 256);
3216 if (dimap[w] & m) {
3217 printk("DEBUG diAlloc: duplicate alloc ino:0x%x\n", ino);
3218 }
3219 dimap[w] |= m;
3220}
3221
3222/*
3223 * DBGdiFree()
3224 */
3225static void DBGdiFree(struct inomap * imap, ino_t ino)
3226{
3227 u32 *dimap = imap->im_DBGdimap;
3228 int w, b;
3229 u32 m;
3230 w = ino >> 5;
3231 b = ino & 31;
3232 m = 0x80000000 >> b;
3233 assert(w < 64 * 256);
3234 if ((dimap[w] & m) == 0) {
3235 printk("DEBUG diFree: duplicate free ino:0x%x\n", ino);
3236 }
3237 dimap[w] &= ~m;
3238}
3239
3240static void dump_cp(struct inomap * ipimap, char *function, int line)
3241{
3242 printk("\n* ********* *\nControl Page %s %d\n", function, line);
3243 printk("FreeIAG %d\tNextIAG %d\n", ipimap->im_freeiag,
3244 ipimap->im_nextiag);
3245 printk("NumInos %d\tNumFree %d\n",
3246 atomic_read(&ipimap->im_numinos),
3247 atomic_read(&ipimap->im_numfree));
3248 printk("AG InoFree %d\tAG ExtFree %d\n",
3249 ipimap->im_agctl[0].inofree, ipimap->im_agctl[0].extfree);
3250 printk("AG NumInos %d\tAG NumFree %d\n",
3251 ipimap->im_agctl[0].numinos, ipimap->im_agctl[0].numfree);
3252}
3253
3254static void dump_iag(struct iag * iag, char *function, int line)
3255{
3256 printk("\n* ********* *\nIAG %s %d\n", function, line);
3257 printk("IagNum %d\tIAG Free %d\n", le32_to_cpu(iag->iagnum),
3258 le32_to_cpu(iag->iagfree));
3259 printk("InoFreeFwd %d\tInoFreeBack %d\n",
3260 le32_to_cpu(iag->inofreefwd),
3261 le32_to_cpu(iag->inofreeback));
3262 printk("ExtFreeFwd %d\tExtFreeBack %d\n",
3263 le32_to_cpu(iag->extfreefwd),
3264 le32_to_cpu(iag->extfreeback));
3265 printk("NFreeInos %d\tNFreeExts %d\n", le32_to_cpu(iag->nfreeinos),
3266 le32_to_cpu(iag->nfreeexts));
3267}
3268#endif /* _JFS_DEBUG_IMAP */
diff --git a/fs/jfs/jfs_logmgr.c b/fs/jfs/jfs_logmgr.c
index 7c8387ed41..d27bac6aca 100644
--- a/fs/jfs/jfs_logmgr.c
+++ b/fs/jfs/jfs_logmgr.c
@@ -191,7 +191,7 @@ static int lbmIOWait(struct lbuf * bp, int flag);
191static bio_end_io_t lbmIODone; 191static bio_end_io_t lbmIODone;
192static void lbmStartIO(struct lbuf * bp); 192static void lbmStartIO(struct lbuf * bp);
193static void lmGCwrite(struct jfs_log * log, int cant_block); 193static void lmGCwrite(struct jfs_log * log, int cant_block);
194static int lmLogSync(struct jfs_log * log, int nosyncwait); 194static int lmLogSync(struct jfs_log * log, int hard_sync);
195 195
196 196
197 197
@@ -915,19 +915,17 @@ static void lmPostGC(struct lbuf * bp)
915 * if new sync address is available 915 * if new sync address is available
916 * (normally the case if sync() is executed by back-ground 916 * (normally the case if sync() is executed by back-ground
917 * process). 917 * process).
918 * if not, explicitly run jfs_blogsync() to initiate
919 * getting of new sync address.
920 * calculate new value of i_nextsync which determines when 918 * calculate new value of i_nextsync which determines when
921 * this code is called again. 919 * this code is called again.
922 * 920 *
923 * PARAMETERS: log - log structure 921 * PARAMETERS: log - log structure
924 * nosyncwait - 1 if called asynchronously 922 * hard_sync - 1 to force all metadata to be written
925 * 923 *
926 * RETURN: 0 924 * RETURN: 0
927 * 925 *
928 * serialization: LOG_LOCK() held on entry/exit 926 * serialization: LOG_LOCK() held on entry/exit
929 */ 927 */
930static int lmLogSync(struct jfs_log * log, int nosyncwait) 928static int lmLogSync(struct jfs_log * log, int hard_sync)
931{ 929{
932 int logsize; 930 int logsize;
933 int written; /* written since last syncpt */ 931 int written; /* written since last syncpt */
@@ -941,11 +939,18 @@ static int lmLogSync(struct jfs_log * log, int nosyncwait)
941 unsigned long flags; 939 unsigned long flags;
942 940
943 /* push dirty metapages out to disk */ 941 /* push dirty metapages out to disk */
944 list_for_each_entry(sbi, &log->sb_list, log_list) { 942 if (hard_sync)
945 filemap_flush(sbi->ipbmap->i_mapping); 943 list_for_each_entry(sbi, &log->sb_list, log_list) {
946 filemap_flush(sbi->ipimap->i_mapping); 944 filemap_fdatawrite(sbi->ipbmap->i_mapping);
947 filemap_flush(sbi->direct_inode->i_mapping); 945 filemap_fdatawrite(sbi->ipimap->i_mapping);
948 } 946 filemap_fdatawrite(sbi->direct_inode->i_mapping);
947 }
948 else
949 list_for_each_entry(sbi, &log->sb_list, log_list) {
950 filemap_flush(sbi->ipbmap->i_mapping);
951 filemap_flush(sbi->ipimap->i_mapping);
952 filemap_flush(sbi->direct_inode->i_mapping);
953 }
949 954
950 /* 955 /*
951 * forward syncpt 956 * forward syncpt
@@ -1021,16 +1026,13 @@ static int lmLogSync(struct jfs_log * log, int nosyncwait)
1021 /* next syncpt trigger = written + more */ 1026 /* next syncpt trigger = written + more */
1022 log->nextsync = written + more; 1027 log->nextsync = written + more;
1023 1028
1024 /* return if lmLogSync() from outside of transaction, e.g., sync() */
1025 if (nosyncwait)
1026 return lsn;
1027
1028 /* if number of bytes written from last sync point is more 1029 /* if number of bytes written from last sync point is more
1029 * than 1/4 of the log size, stop new transactions from 1030 * than 1/4 of the log size, stop new transactions from
1030 * starting until all current transactions are completed 1031 * starting until all current transactions are completed
1031 * by setting syncbarrier flag. 1032 * by setting syncbarrier flag.
1032 */ 1033 */
1033 if (written > LOGSYNC_BARRIER(logsize) && logsize > 32 * LOGPSIZE) { 1034 if (!test_bit(log_SYNCBARRIER, &log->flag) &&
1035 (written > LOGSYNC_BARRIER(logsize)) && log->active) {
1034 set_bit(log_SYNCBARRIER, &log->flag); 1036 set_bit(log_SYNCBARRIER, &log->flag);
1035 jfs_info("log barrier on: lsn=0x%x syncpt=0x%x", lsn, 1037 jfs_info("log barrier on: lsn=0x%x syncpt=0x%x", lsn,
1036 log->syncpt); 1038 log->syncpt);
@@ -1048,11 +1050,12 @@ static int lmLogSync(struct jfs_log * log, int nosyncwait)
1048 * 1050 *
1049 * FUNCTION: write log SYNCPT record for specified log 1051 * FUNCTION: write log SYNCPT record for specified log
1050 * 1052 *
1051 * PARAMETERS: log - log structure 1053 * PARAMETERS: log - log structure
1054 * hard_sync - set to 1 to force metadata to be written
1052 */ 1055 */
1053void jfs_syncpt(struct jfs_log *log) 1056void jfs_syncpt(struct jfs_log *log, int hard_sync)
1054{ LOG_LOCK(log); 1057{ LOG_LOCK(log);
1055 lmLogSync(log, 1); 1058 lmLogSync(log, hard_sync);
1056 LOG_UNLOCK(log); 1059 LOG_UNLOCK(log);
1057} 1060}
1058 1061
@@ -2359,9 +2362,9 @@ int jfsIOWait(void *arg)
2359 lbmStartIO(bp); 2362 lbmStartIO(bp);
2360 spin_lock_irq(&log_redrive_lock); 2363 spin_lock_irq(&log_redrive_lock);
2361 } 2364 }
2362 if (current->flags & PF_FREEZE) { 2365 if (freezing(current)) {
2363 spin_unlock_irq(&log_redrive_lock); 2366 spin_unlock_irq(&log_redrive_lock);
2364 refrigerator(PF_FREEZE); 2367 refrigerator();
2365 } else { 2368 } else {
2366 add_wait_queue(&jfs_IO_thread_wait, &wq); 2369 add_wait_queue(&jfs_IO_thread_wait, &wq);
2367 set_current_state(TASK_INTERRUPTIBLE); 2370 set_current_state(TASK_INTERRUPTIBLE);
diff --git a/fs/jfs/jfs_logmgr.h b/fs/jfs/jfs_logmgr.h
index 747114cd38..e4978b5b65 100644
--- a/fs/jfs/jfs_logmgr.h
+++ b/fs/jfs/jfs_logmgr.h
@@ -510,6 +510,6 @@ extern int lmLogFormat(struct jfs_log *log, s64 logAddress, int logSize);
510extern int lmGroupCommit(struct jfs_log *, struct tblock *); 510extern int lmGroupCommit(struct jfs_log *, struct tblock *);
511extern int jfsIOWait(void *); 511extern int jfsIOWait(void *);
512extern void jfs_flush_journal(struct jfs_log * log, int wait); 512extern void jfs_flush_journal(struct jfs_log * log, int wait);
513extern void jfs_syncpt(struct jfs_log *log); 513extern void jfs_syncpt(struct jfs_log *log, int hard_sync);
514 514
515#endif /* _H_JFS_LOGMGR */ 515#endif /* _H_JFS_LOGMGR */
diff --git a/fs/jfs/jfs_metapage.c b/fs/jfs/jfs_metapage.c
index 6c5485d16c..13d7e3f1fe 100644
--- a/fs/jfs/jfs_metapage.c
+++ b/fs/jfs/jfs_metapage.c
@@ -561,7 +561,6 @@ static int metapage_releasepage(struct page *page, int gfp_mask)
561 dump_mem("page", page, sizeof(struct page)); 561 dump_mem("page", page, sizeof(struct page));
562 dump_stack(); 562 dump_stack();
563 } 563 }
564 WARN_ON(mp->lsn);
565 if (mp->lsn) 564 if (mp->lsn)
566 remove_from_logsync(mp); 565 remove_from_logsync(mp);
567 remove_metapage(page, mp); 566 remove_metapage(page, mp);
@@ -641,7 +640,7 @@ struct metapage *__get_metapage(struct inode *inode, unsigned long lblock,
641 } else { 640 } else {
642 page = read_cache_page(mapping, page_index, 641 page = read_cache_page(mapping, page_index,
643 (filler_t *)mapping->a_ops->readpage, NULL); 642 (filler_t *)mapping->a_ops->readpage, NULL);
644 if (IS_ERR(page)) { 643 if (IS_ERR(page) || !PageUptodate(page)) {
645 jfs_err("read_cache_page failed!"); 644 jfs_err("read_cache_page failed!");
646 return NULL; 645 return NULL;
647 } 646 }
@@ -783,14 +782,6 @@ void release_metapage(struct metapage * mp)
783 if (test_bit(META_discard, &mp->flag) && !mp->count) { 782 if (test_bit(META_discard, &mp->flag) && !mp->count) {
784 clear_page_dirty(page); 783 clear_page_dirty(page);
785 ClearPageUptodate(page); 784 ClearPageUptodate(page);
786#ifdef _NOT_YET
787 if (page->mapping) {
788 /* Remove from page cache and page cache reference */
789 remove_from_page_cache(page);
790 page_cache_release(page);
791 metapage_releasepage(page, 0);
792 }
793#endif
794 } 785 }
795#else 786#else
796 /* Try to keep metapages from using up too much memory */ 787 /* Try to keep metapages from using up too much memory */
diff --git a/fs/jfs/jfs_txnmgr.c b/fs/jfs/jfs_txnmgr.c
index 8cbaaff1d5..c7a92f9deb 100644
--- a/fs/jfs/jfs_txnmgr.c
+++ b/fs/jfs/jfs_txnmgr.c
@@ -552,6 +552,11 @@ void txEnd(tid_t tid)
552 * synchronize with logsync barrier 552 * synchronize with logsync barrier
553 */ 553 */
554 if (test_bit(log_SYNCBARRIER, &log->flag)) { 554 if (test_bit(log_SYNCBARRIER, &log->flag)) {
555 TXN_UNLOCK();
556
557 /* write dirty metadata & forward log syncpt */
558 jfs_syncpt(log, 1);
559
555 jfs_info("log barrier off: 0x%x", log->lsn); 560 jfs_info("log barrier off: 0x%x", log->lsn);
556 561
557 /* enable new transactions start */ 562 /* enable new transactions start */
@@ -560,11 +565,6 @@ void txEnd(tid_t tid)
560 /* wakeup all waitors for logsync barrier */ 565 /* wakeup all waitors for logsync barrier */
561 TXN_WAKEUP(&log->syncwait); 566 TXN_WAKEUP(&log->syncwait);
562 567
563 TXN_UNLOCK();
564
565 /* forward log syncpt */
566 jfs_syncpt(log);
567
568 goto wakeup; 568 goto wakeup;
569 } 569 }
570 } 570 }
@@ -657,7 +657,9 @@ struct tlock *txLock(tid_t tid, struct inode *ip, struct metapage * mp,
657 /* only anonymous txn. 657 /* only anonymous txn.
658 * Remove from anon_list 658 * Remove from anon_list
659 */ 659 */
660 TXN_LOCK();
660 list_del_init(&jfs_ip->anon_inode_list); 661 list_del_init(&jfs_ip->anon_inode_list);
662 TXN_UNLOCK();
661 } 663 }
662 jfs_ip->atlhead = tlck->next; 664 jfs_ip->atlhead = tlck->next;
663 } else { 665 } else {
@@ -2788,9 +2790,9 @@ int jfs_lazycommit(void *arg)
2788 /* In case a wakeup came while all threads were active */ 2790 /* In case a wakeup came while all threads were active */
2789 jfs_commit_thread_waking = 0; 2791 jfs_commit_thread_waking = 0;
2790 2792
2791 if (current->flags & PF_FREEZE) { 2793 if (freezing(current)) {
2792 LAZY_UNLOCK(flags); 2794 LAZY_UNLOCK(flags);
2793 refrigerator(PF_FREEZE); 2795 refrigerator();
2794 } else { 2796 } else {
2795 DECLARE_WAITQUEUE(wq, current); 2797 DECLARE_WAITQUEUE(wq, current);
2796 2798
@@ -2987,9 +2989,9 @@ int jfs_sync(void *arg)
2987 /* Add anon_list2 back to anon_list */ 2989 /* Add anon_list2 back to anon_list */
2988 list_splice_init(&TxAnchor.anon_list2, &TxAnchor.anon_list); 2990 list_splice_init(&TxAnchor.anon_list2, &TxAnchor.anon_list);
2989 2991
2990 if (current->flags & PF_FREEZE) { 2992 if (freezing(current)) {
2991 TXN_UNLOCK(); 2993 TXN_UNLOCK();
2992 refrigerator(PF_FREEZE); 2994 refrigerator();
2993 } else { 2995 } else {
2994 DECLARE_WAITQUEUE(wq, current); 2996 DECLARE_WAITQUEUE(wq, current);
2995 2997
diff --git a/fs/jfs/jfs_unicode.c b/fs/jfs/jfs_unicode.c
index b32208aad5..f327decfb1 100644
--- a/fs/jfs/jfs_unicode.c
+++ b/fs/jfs/jfs_unicode.c
@@ -51,8 +51,9 @@ int jfs_strfromUCS_le(char *to, const __le16 * from,
51 } 51 }
52 } else { 52 } else {
53 for (i = 0; (i < len) && from[i]; i++) { 53 for (i = 0; (i < len) && from[i]; i++) {
54 if (le16_to_cpu(from[i]) & 0xff00) { 54 if (unlikely(le16_to_cpu(from[i]) & 0xff00)) {
55 if (warn) { 55 to[i] = '?';
56 if (unlikely(warn)) {
56 warn--; 57 warn--;
57 warn_again--; 58 warn_again--;
58 printk(KERN_ERR 59 printk(KERN_ERR
@@ -61,7 +62,7 @@ int jfs_strfromUCS_le(char *to, const __le16 * from,
61 printk(KERN_ERR 62 printk(KERN_ERR
62 "mount with iocharset=utf8 to access\n"); 63 "mount with iocharset=utf8 to access\n");
63 } 64 }
64 to[i] = '?'; 65
65 } 66 }
66 else 67 else
67 to[i] = (char) (le16_to_cpu(from[i])); 68 to[i] = (char) (le16_to_cpu(from[i]));
diff --git a/fs/jfs/jfs_xtree.c b/fs/jfs/jfs_xtree.c
index 31b34db451..a7fe2f2b96 100644
--- a/fs/jfs/jfs_xtree.c
+++ b/fs/jfs/jfs_xtree.c
@@ -135,14 +135,6 @@ static int xtSearchNode(struct inode *ip,
135static int xtRelink(tid_t tid, struct inode *ip, xtpage_t * fp); 135static int xtRelink(tid_t tid, struct inode *ip, xtpage_t * fp);
136#endif /* _STILL_TO_PORT */ 136#endif /* _STILL_TO_PORT */
137 137
138/* External references */
139
140/*
141 * debug control
142 */
143/* #define _JFS_DEBUG_XTREE 1 */
144
145
146/* 138/*
147 * xtLookup() 139 * xtLookup()
148 * 140 *
@@ -4140,338 +4132,6 @@ s64 xtTruncate_pmap(tid_t tid, struct inode *ip, s64 committed_size)
4140 return 0; 4132 return 0;
4141} 4133}
4142 4134
4143
4144#ifdef _JFS_DEBUG_XTREE
4145/*
4146 * xtDisplayTree()
4147 *
4148 * function: traverse forward
4149 */
4150int xtDisplayTree(struct inode *ip)
4151{
4152 int rc = 0;
4153 struct metapage *mp;
4154 xtpage_t *p;
4155 s64 bn, pbn;
4156 int index, lastindex, v, h;
4157 xad_t *xad;
4158 struct btstack btstack;
4159 struct btframe *btsp;
4160 struct btframe *parent;
4161
4162 printk("display B+-tree.\n");
4163
4164 /* clear stack */
4165 btsp = btstack.stack;
4166
4167 /*
4168 * start with root
4169 *
4170 * root resides in the inode
4171 */
4172 bn = 0;
4173 v = h = 0;
4174
4175 /*
4176 * first access of each page:
4177 */
4178 getPage:
4179 XT_GETPAGE(ip, bn, mp, PSIZE, p, rc);
4180 if (rc)
4181 return rc;
4182
4183 /* process entries forward from first index */
4184 index = XTENTRYSTART;
4185 lastindex = le16_to_cpu(p->header.nextindex) - 1;
4186
4187 if (p->header.flag & BT_INTERNAL) {
4188 /*
4189 * first access of each internal page
4190 */
4191 goto getChild;
4192 } else { /* (p->header.flag & BT_LEAF) */
4193
4194 /*
4195 * first access of each leaf page
4196 */
4197 printf("leaf page ");
4198 xtDisplayPage(ip, bn, p);
4199
4200 /* unpin the leaf page */
4201 XT_PUTPAGE(mp);
4202 }
4203
4204 /*
4205 * go back up to the parent page
4206 */
4207 getParent:
4208 /* pop/restore parent entry for the current child page */
4209 if ((parent = (btsp == btstack.stack ? NULL : --btsp)) == NULL)
4210 /* current page must have been root */
4211 return;
4212
4213 /*
4214 * parent page scan completed
4215 */
4216 if ((index = parent->index) == (lastindex = parent->lastindex)) {
4217 /* go back up to the parent page */
4218 goto getParent;
4219 }
4220
4221 /*
4222 * parent page has entries remaining
4223 */
4224 /* get back the parent page */
4225 bn = parent->bn;
4226 /* v = parent->level; */
4227 XT_GETPAGE(ip, bn, mp, PSIZE, p, rc);
4228 if (rc)
4229 return rc;
4230
4231 /* get next parent entry */
4232 index++;
4233
4234 /*
4235 * internal page: go down to child page of current entry
4236 */
4237 getChild:
4238 /* push/save current parent entry for the child page */
4239 btsp->bn = pbn = bn;
4240 btsp->index = index;
4241 btsp->lastindex = lastindex;
4242 /* btsp->level = v; */
4243 /* btsp->node = h; */
4244 ++btsp;
4245
4246 /* get child page */
4247 xad = &p->xad[index];
4248 bn = addressXAD(xad);
4249
4250 /*
4251 * first access of each internal entry:
4252 */
4253 /* release parent page */
4254 XT_PUTPAGE(mp);
4255
4256 printk("traverse down 0x%lx[%d]->0x%lx\n", (ulong) pbn, index,
4257 (ulong) bn);
4258 v++;
4259 h = index;
4260
4261 /* process the child page */
4262 goto getPage;
4263}
4264
4265
4266/*
4267 * xtDisplayPage()
4268 *
4269 * function: display page
4270 */
4271int xtDisplayPage(struct inode *ip, s64 bn, xtpage_t * p)
4272{
4273 int rc = 0;
4274 xad_t *xad;
4275 s64 xaddr, xoff;
4276 int xlen, i, j;
4277
4278 /* display page control */
4279 printf("bn:0x%lx flag:0x%x nextindex:%d\n",
4280 (ulong) bn, p->header.flag,
4281 le16_to_cpu(p->header.nextindex));
4282
4283 /* display entries */
4284 xad = &p->xad[XTENTRYSTART];
4285 for (i = XTENTRYSTART, j = 1; i < le16_to_cpu(p->header.nextindex);
4286 i++, xad++, j++) {
4287 xoff = offsetXAD(xad);
4288 xaddr = addressXAD(xad);
4289 xlen = lengthXAD(xad);
4290 printf("\t[%d] 0x%lx:0x%lx(0x%x)", i, (ulong) xoff,
4291 (ulong) xaddr, xlen);
4292
4293 if (j == 4) {
4294 printf("\n");
4295 j = 0;
4296 }
4297 }
4298
4299 printf("\n");
4300}
4301#endif /* _JFS_DEBUG_XTREE */
4302
4303
4304#ifdef _JFS_WIP
4305/*
4306 * xtGather()
4307 *
4308 * function:
4309 * traverse for allocation acquiring tlock at commit time
4310 * (vs at the time of update) logging backward top down
4311 *
4312 * note:
4313 * problem - establishing that all new allocation have been
4314 * processed both for append and random write in sparse file
4315 * at the current entry at the current subtree root page
4316 *
4317 */
4318int xtGather(btree_t *t)
4319{
4320 int rc = 0;
4321 xtpage_t *p;
4322 u64 bn;
4323 int index;
4324 btentry_t *e;
4325 struct btstack btstack;
4326 struct btsf *parent;
4327
4328 /* clear stack */
4329 BT_CLR(&btstack);
4330
4331 /*
4332 * start with root
4333 *
4334 * root resides in the inode
4335 */
4336 bn = 0;
4337 XT_GETPAGE(ip, bn, mp, PSIZE, p, rc);
4338 if (rc)
4339 return rc;
4340
4341 /* new root is NOT pointed by a new entry
4342 if (p->header.flag & NEW)
4343 allocate new page lock;
4344 write a NEWPAGE log;
4345 */
4346
4347 dopage:
4348 /*
4349 * first access of each page:
4350 */
4351 /* process entries backward from last index */
4352 index = le16_to_cpu(p->header.nextindex) - 1;
4353
4354 if (p->header.flag & BT_LEAF) {
4355 /*
4356 * first access of each leaf page
4357 */
4358 /* process leaf page entries backward */
4359 for (; index >= XTENTRYSTART; index--) {
4360 e = &p->xad[index];
4361 /*
4362 * if newpage, log NEWPAGE.
4363 *
4364 if (e->flag & XAD_NEW) {
4365 nfound =+ entry->length;
4366 update current page lock for the entry;
4367 newpage(entry);
4368 *
4369 * if moved, log move.
4370 *
4371 } else if (e->flag & XAD_MOVED) {
4372 reset flag;
4373 update current page lock for the entry;
4374 }
4375 */
4376 }
4377
4378 /* unpin the leaf page */
4379 XT_PUTPAGE(mp);
4380
4381 /*
4382 * go back up to the parent page
4383 */
4384 getParent:
4385 /* restore parent entry for the current child page */
4386 if ((parent = BT_POP(&btstack)) == NULL)
4387 /* current page must have been root */
4388 return 0;
4389
4390 if ((index = parent->index) == XTENTRYSTART) {
4391 /*
4392 * parent page scan completed
4393 */
4394 /* go back up to the parent page */
4395 goto getParent;
4396 } else {
4397 /*
4398 * parent page has entries remaining
4399 */
4400 /* get back the parent page */
4401 bn = parent->bn;
4402 XT_GETPAGE(ip, bn, mp, PSIZE, p, rc);
4403 if (rc)
4404 return -EIO;
4405
4406 /* first subroot page which
4407 * covers all new allocated blocks
4408 * itself not new/modified.
4409 * (if modified from split of descendent,
4410 * go down path of split page)
4411
4412 if (nfound == nnew &&
4413 !(p->header.flag & (NEW | MOD)))
4414 exit scan;
4415 */
4416
4417 /* process parent page entries backward */
4418 index--;
4419 }
4420 } else {
4421 /*
4422 * first access of each internal page
4423 */
4424 }
4425
4426 /*
4427 * internal page: go down to child page of current entry
4428 */
4429
4430 /* save current parent entry for the child page */
4431 BT_PUSH(&btstack, bn, index);
4432
4433 /* get current entry for the child page */
4434 e = &p->xad[index];
4435
4436 /*
4437 * first access of each internal entry:
4438 */
4439 /*
4440 * if new entry, log btree_tnewentry.
4441 *
4442 if (e->flag & XAD_NEW)
4443 update parent page lock for the entry;
4444 */
4445
4446 /* release parent page */
4447 XT_PUTPAGE(mp);
4448
4449 /* get child page */
4450 bn = e->bn;
4451 XT_GETPAGE(ip, bn, mp, PSIZE, p, rc);
4452 if (rc)
4453 return rc;
4454
4455 /*
4456 * first access of each non-root page:
4457 */
4458 /*
4459 * if new, log btree_newpage.
4460 *
4461 if (p->header.flag & NEW)
4462 allocate new page lock;
4463 write a NEWPAGE log (next, prev);
4464 */
4465
4466 /* process the child page */
4467 goto dopage;
4468
4469 out:
4470 return 0;
4471}
4472#endif /* _JFS_WIP */
4473
4474
4475#ifdef CONFIG_JFS_STATISTICS 4135#ifdef CONFIG_JFS_STATISTICS
4476int jfs_xtstat_read(char *buffer, char **start, off_t offset, int length, 4136int jfs_xtstat_read(char *buffer, char **start, off_t offset, int length,
4477 int *eof, void *data) 4137 int *eof, void *data)
diff --git a/fs/jfs/jfs_xtree.h b/fs/jfs/jfs_xtree.h
index a69784254f..af668a80b4 100644
--- a/fs/jfs/jfs_xtree.h
+++ b/fs/jfs/jfs_xtree.h
@@ -131,10 +131,4 @@ extern int xtRelocate(tid_t tid, struct inode *ip,
131extern int xtAppend(tid_t tid, 131extern int xtAppend(tid_t tid,
132 struct inode *ip, int xflag, s64 xoff, int maxblocks, 132 struct inode *ip, int xflag, s64 xoff, int maxblocks,
133 int *xlenp, s64 * xaddrp, int flag); 133 int *xlenp, s64 * xaddrp, int flag);
134
135#ifdef _JFS_DEBUG_XTREE
136extern int xtDisplayTree(struct inode *ip);
137extern int xtDisplayPage(struct inode *ip, s64 bn, xtpage_t * p);
138#endif /* _JFS_DEBUG_XTREE */
139
140#endif /* !_H_JFS_XTREE */ 134#endif /* !_H_JFS_XTREE */
diff --git a/fs/jfs/super.c b/fs/jfs/super.c
index 810a3653d8..9ff89720f9 100644
--- a/fs/jfs/super.c
+++ b/fs/jfs/super.c
@@ -24,6 +24,7 @@
24#include <linux/completion.h> 24#include <linux/completion.h>
25#include <linux/vfs.h> 25#include <linux/vfs.h>
26#include <linux/moduleparam.h> 26#include <linux/moduleparam.h>
27#include <linux/posix_acl.h>
27#include <asm/uaccess.h> 28#include <asm/uaccess.h>
28 29
29#include "jfs_incore.h" 30#include "jfs_incore.h"
@@ -113,6 +114,8 @@ static void jfs_destroy_inode(struct inode *inode)
113{ 114{
114 struct jfs_inode_info *ji = JFS_IP(inode); 115 struct jfs_inode_info *ji = JFS_IP(inode);
115 116
117 BUG_ON(!list_empty(&ji->anon_inode_list));
118
116 spin_lock_irq(&ji->ag_lock); 119 spin_lock_irq(&ji->ag_lock);
117 if (ji->active_ag != -1) { 120 if (ji->active_ag != -1) {
118 struct bmap *bmap = JFS_SBI(inode->i_sb)->bmap; 121 struct bmap *bmap = JFS_SBI(inode->i_sb)->bmap;
@@ -530,7 +533,7 @@ static int jfs_sync_fs(struct super_block *sb, int wait)
530 /* log == NULL indicates read-only mount */ 533 /* log == NULL indicates read-only mount */
531 if (log) { 534 if (log) {
532 jfs_flush_journal(log, wait); 535 jfs_flush_journal(log, wait);
533 jfs_syncpt(log); 536 jfs_syncpt(log, 0);
534 } 537 }
535 538
536 return 0; 539 return 0;
diff --git a/fs/jfs/symlink.c b/fs/jfs/symlink.c
index 287d8d6c3c..16477b3835 100644
--- a/fs/jfs/symlink.c
+++ b/fs/jfs/symlink.c
@@ -22,11 +22,11 @@
22#include "jfs_inode.h" 22#include "jfs_inode.h"
23#include "jfs_xattr.h" 23#include "jfs_xattr.h"
24 24
25static int jfs_follow_link(struct dentry *dentry, struct nameidata *nd) 25static void *jfs_follow_link(struct dentry *dentry, struct nameidata *nd)
26{ 26{
27 char *s = JFS_IP(dentry->d_inode)->i_inline; 27 char *s = JFS_IP(dentry->d_inode)->i_inline;
28 nd_set_link(nd, s); 28 nd_set_link(nd, s);
29 return 0; 29 return NULL;
30} 30}
31 31
32struct inode_operations jfs_symlink_inode_operations = { 32struct inode_operations jfs_symlink_inode_operations = {
diff --git a/fs/jfs/xattr.c b/fs/jfs/xattr.c
index 6016373701..554ec739e4 100644
--- a/fs/jfs/xattr.c
+++ b/fs/jfs/xattr.c
@@ -19,6 +19,7 @@
19 19
20#include <linux/fs.h> 20#include <linux/fs.h>
21#include <linux/xattr.h> 21#include <linux/xattr.h>
22#include <linux/posix_acl_xattr.h>
22#include <linux/quotaops.h> 23#include <linux/quotaops.h>
23#include "jfs_incore.h" 24#include "jfs_incore.h"
24#include "jfs_superblock.h" 25#include "jfs_superblock.h"
@@ -718,9 +719,9 @@ static int can_set_system_xattr(struct inode *inode, const char *name,
718 return -EPERM; 719 return -EPERM;
719 720
720 /* 721 /*
721 * XATTR_NAME_ACL_ACCESS is tied to i_mode 722 * POSIX_ACL_XATTR_ACCESS is tied to i_mode
722 */ 723 */
723 if (strcmp(name, XATTR_NAME_ACL_ACCESS) == 0) { 724 if (strcmp(name, POSIX_ACL_XATTR_ACCESS) == 0) {
724 acl = posix_acl_from_xattr(value, value_len); 725 acl = posix_acl_from_xattr(value, value_len);
725 if (IS_ERR(acl)) { 726 if (IS_ERR(acl)) {
726 rc = PTR_ERR(acl); 727 rc = PTR_ERR(acl);
@@ -750,7 +751,7 @@ static int can_set_system_xattr(struct inode *inode, const char *name,
750 JFS_IP(inode)->i_acl = JFS_ACL_NOT_CACHED; 751 JFS_IP(inode)->i_acl = JFS_ACL_NOT_CACHED;
751 752
752 return 0; 753 return 0;
753 } else if (strcmp(name, XATTR_NAME_ACL_DEFAULT) == 0) { 754 } else if (strcmp(name, POSIX_ACL_XATTR_DEFAULT) == 0) {
754 acl = posix_acl_from_xattr(value, value_len); 755 acl = posix_acl_from_xattr(value, value_len);
755 if (IS_ERR(acl)) { 756 if (IS_ERR(acl)) {
756 rc = PTR_ERR(acl); 757 rc = PTR_ERR(acl);
@@ -780,7 +781,7 @@ static int can_set_xattr(struct inode *inode, const char *name,
780 if (IS_RDONLY(inode)) 781 if (IS_RDONLY(inode))
781 return -EROFS; 782 return -EROFS;
782 783
783 if (IS_IMMUTABLE(inode) || IS_APPEND(inode) || S_ISLNK(inode->i_mode)) 784 if (IS_IMMUTABLE(inode) || IS_APPEND(inode))
784 return -EPERM; 785 return -EPERM;
785 786
786 if(strncmp(name, XATTR_SYSTEM_PREFIX, XATTR_SYSTEM_PREFIX_LEN) == 0) 787 if(strncmp(name, XATTR_SYSTEM_PREFIX, XATTR_SYSTEM_PREFIX_LEN) == 0)
@@ -789,12 +790,12 @@ static int can_set_xattr(struct inode *inode, const char *name,
789 */ 790 */
790 return can_set_system_xattr(inode, name, value, value_len); 791 return can_set_system_xattr(inode, name, value, value_len);
791 792
792 if(strncmp(name, XATTR_TRUSTED_PREFIX, XATTR_TRUSTED_PREFIX_LEN) != 0) 793 if(strncmp(name, XATTR_TRUSTED_PREFIX, XATTR_TRUSTED_PREFIX_LEN) == 0)
793 return (capable(CAP_SYS_ADMIN) ? 0 : -EPERM); 794 return (capable(CAP_SYS_ADMIN) ? 0 : -EPERM);
794 795
795#ifdef CONFIG_JFS_SECURITY 796#ifdef CONFIG_JFS_SECURITY
796 if (strncmp(name, XATTR_SECURITY_PREFIX, XATTR_SECURITY_PREFIX_LEN) 797 if (strncmp(name, XATTR_SECURITY_PREFIX, XATTR_SECURITY_PREFIX_LEN)
797 != 0) 798 == 0)
798 return 0; /* Leave it to the security module */ 799 return 0; /* Leave it to the security module */
799#endif 800#endif
800 801