aboutsummaryrefslogtreecommitdiffstats
path: root/fs/jfs/jfs_logmgr.c
diff options
context:
space:
mode:
authorDave Kleikamp <shaggy@austin.ibm.com>2005-05-02 14:25:02 -0400
committerLinus Torvalds <torvalds@ppc970.osdl.org>2005-05-03 01:23:53 -0400
commit7fab479bebb96b1b4888bdae9b42e1fa9c5d3f38 (patch)
tree3d47de90cf39002e576df02f474bc17342ff0f4a /fs/jfs/jfs_logmgr.c
parentdc5798d9a7b656550533a5c0177dba17d4ef4990 (diff)
[PATCH] JFS: Support page sizes greater than 4K
jfs has never worked on architecutures where the page size was not 4K. Signed-off-by: Dave Kleikamp <shaggy@austin.ibm.com> Signed-off-by: Linus Torvalds <torvalds@osdl.org>
Diffstat (limited to 'fs/jfs/jfs_logmgr.c')
-rw-r--r--fs/jfs/jfs_logmgr.c71
1 files changed, 40 insertions, 31 deletions
diff --git a/fs/jfs/jfs_logmgr.c b/fs/jfs/jfs_logmgr.c
index e0f867ddfd10..cfcdad3459dd 100644
--- a/fs/jfs/jfs_logmgr.c
+++ b/fs/jfs/jfs_logmgr.c
@@ -234,6 +234,7 @@ int lmLog(struct jfs_log * log, struct tblock * tblk, struct lrd * lrd,
234 int lsn; 234 int lsn;
235 int diffp, difft; 235 int diffp, difft;
236 struct metapage *mp = NULL; 236 struct metapage *mp = NULL;
237 unsigned long flags;
237 238
238 jfs_info("lmLog: log:0x%p tblk:0x%p, lrd:0x%p tlck:0x%p", 239 jfs_info("lmLog: log:0x%p tblk:0x%p, lrd:0x%p tlck:0x%p",
239 log, tblk, lrd, tlck); 240 log, tblk, lrd, tlck);
@@ -254,7 +255,7 @@ int lmLog(struct jfs_log * log, struct tblock * tblk, struct lrd * lrd,
254 */ 255 */
255 lsn = log->lsn; 256 lsn = log->lsn;
256 257
257 LOGSYNC_LOCK(log); 258 LOGSYNC_LOCK(log, flags);
258 259
259 /* 260 /*
260 * initialize page lsn if first log write of the page 261 * initialize page lsn if first log write of the page
@@ -310,7 +311,7 @@ int lmLog(struct jfs_log * log, struct tblock * tblk, struct lrd * lrd,
310 } 311 }
311 } 312 }
312 313
313 LOGSYNC_UNLOCK(log); 314 LOGSYNC_UNLOCK(log, flags);
314 315
315 /* 316 /*
316 * write the log record 317 * write the log record
@@ -334,7 +335,6 @@ int lmLog(struct jfs_log * log, struct tblock * tblk, struct lrd * lrd,
334 return lsn; 335 return lsn;
335} 336}
336 337
337
338/* 338/*
339 * NAME: lmWriteRecord() 339 * NAME: lmWriteRecord()
340 * 340 *
@@ -945,6 +945,15 @@ static int lmLogSync(struct jfs_log * log, int nosyncwait)
945 struct lrd lrd; 945 struct lrd lrd;
946 int lsn; 946 int lsn;
947 struct logsyncblk *lp; 947 struct logsyncblk *lp;
948 struct jfs_sb_info *sbi;
949 unsigned long flags;
950
951 /* push dirty metapages out to disk */
952 list_for_each_entry(sbi, &log->sb_list, log_list) {
953 filemap_flush(sbi->ipbmap->i_mapping);
954 filemap_flush(sbi->ipimap->i_mapping);
955 filemap_flush(sbi->direct_inode->i_mapping);
956 }
948 957
949 /* 958 /*
950 * forward syncpt 959 * forward syncpt
@@ -954,10 +963,7 @@ static int lmLogSync(struct jfs_log * log, int nosyncwait)
954 */ 963 */
955 964
956 if (log->sync == log->syncpt) { 965 if (log->sync == log->syncpt) {
957 LOGSYNC_LOCK(log); 966 LOGSYNC_LOCK(log, flags);
958 /* ToDo: push dirty metapages out to disk */
959// bmLogSync(log);
960
961 if (list_empty(&log->synclist)) 967 if (list_empty(&log->synclist))
962 log->sync = log->lsn; 968 log->sync = log->lsn;
963 else { 969 else {
@@ -965,7 +971,7 @@ static int lmLogSync(struct jfs_log * log, int nosyncwait)
965 struct logsyncblk, synclist); 971 struct logsyncblk, synclist);
966 log->sync = lp->lsn; 972 log->sync = lp->lsn;
967 } 973 }
968 LOGSYNC_UNLOCK(log); 974 LOGSYNC_UNLOCK(log, flags);
969 975
970 } 976 }
971 977
@@ -974,27 +980,6 @@ static int lmLogSync(struct jfs_log * log, int nosyncwait)
974 * reset syncpt = sync 980 * reset syncpt = sync
975 */ 981 */
976 if (log->sync != log->syncpt) { 982 if (log->sync != log->syncpt) {
977 struct jfs_sb_info *sbi;
978
979 /*
980 * We need to make sure all of the "written" metapages
981 * actually make it to disk
982 */
983 list_for_each_entry(sbi, &log->sb_list, log_list) {
984 if (sbi->flag & JFS_NOINTEGRITY)
985 continue;
986 filemap_fdatawrite(sbi->ipbmap->i_mapping);
987 filemap_fdatawrite(sbi->ipimap->i_mapping);
988 filemap_fdatawrite(sbi->sb->s_bdev->bd_inode->i_mapping);
989 }
990 list_for_each_entry(sbi, &log->sb_list, log_list) {
991 if (sbi->flag & JFS_NOINTEGRITY)
992 continue;
993 filemap_fdatawait(sbi->ipbmap->i_mapping);
994 filemap_fdatawait(sbi->ipimap->i_mapping);
995 filemap_fdatawait(sbi->sb->s_bdev->bd_inode->i_mapping);
996 }
997
998 lrd.logtid = 0; 983 lrd.logtid = 0;
999 lrd.backchain = 0; 984 lrd.backchain = 0;
1000 lrd.type = cpu_to_le16(LOG_SYNCPT); 985 lrd.type = cpu_to_le16(LOG_SYNCPT);
@@ -1547,6 +1532,7 @@ void jfs_flush_journal(struct jfs_log *log, int wait)
1547{ 1532{
1548 int i; 1533 int i;
1549 struct tblock *target = NULL; 1534 struct tblock *target = NULL;
1535 struct jfs_sb_info *sbi;
1550 1536
1551 /* jfs_write_inode may call us during read-only mount */ 1537 /* jfs_write_inode may call us during read-only mount */
1552 if (!log) 1538 if (!log)
@@ -1608,12 +1594,18 @@ void jfs_flush_journal(struct jfs_log *log, int wait)
1608 if (wait < 2) 1594 if (wait < 2)
1609 return; 1595 return;
1610 1596
1597 list_for_each_entry(sbi, &log->sb_list, log_list) {
1598 filemap_fdatawrite(sbi->ipbmap->i_mapping);
1599 filemap_fdatawrite(sbi->ipimap->i_mapping);
1600 filemap_fdatawrite(sbi->direct_inode->i_mapping);
1601 }
1602
1611 /* 1603 /*
1612 * If there was recent activity, we may need to wait 1604 * If there was recent activity, we may need to wait
1613 * for the lazycommit thread to catch up 1605 * for the lazycommit thread to catch up
1614 */ 1606 */
1615 if ((!list_empty(&log->cqueue)) || !list_empty(&log->synclist)) { 1607 if ((!list_empty(&log->cqueue)) || !list_empty(&log->synclist)) {
1616 for (i = 0; i < 800; i++) { /* Too much? */ 1608 for (i = 0; i < 200; i++) { /* Too much? */
1617 msleep(250); 1609 msleep(250);
1618 if (list_empty(&log->cqueue) && 1610 if (list_empty(&log->cqueue) &&
1619 list_empty(&log->synclist)) 1611 list_empty(&log->synclist))
@@ -1621,7 +1613,24 @@ void jfs_flush_journal(struct jfs_log *log, int wait)
1621 } 1613 }
1622 } 1614 }
1623 assert(list_empty(&log->cqueue)); 1615 assert(list_empty(&log->cqueue));
1624 assert(list_empty(&log->synclist)); 1616 if (!list_empty(&log->synclist)) {
1617 struct logsyncblk *lp;
1618
1619 list_for_each_entry(lp, &log->synclist, synclist) {
1620 if (lp->xflag & COMMIT_PAGE) {
1621 struct metapage *mp = (struct metapage *)lp;
1622 dump_mem("orphan metapage", lp,
1623 sizeof(struct metapage));
1624 dump_mem("page", mp->page, sizeof(struct page));
1625 }
1626 else
1627 dump_mem("orphan tblock", lp,
1628 sizeof(struct tblock));
1629 }
1630// current->state = TASK_INTERRUPTIBLE;
1631// schedule();
1632 }
1633 //assert(list_empty(&log->synclist));
1625 clear_bit(log_FLUSH, &log->flag); 1634 clear_bit(log_FLUSH, &log->flag);
1626} 1635}
1627 1636