aboutsummaryrefslogtreecommitdiffstats
path: root/fs/nilfs2/page.c
diff options
context:
space:
mode:
Diffstat (limited to 'fs/nilfs2/page.c')
-rw-r--r--fs/nilfs2/page.c200
1 files changed, 103 insertions, 97 deletions
diff --git a/fs/nilfs2/page.c b/fs/nilfs2/page.c
index aab11db2cb08..65221a04c6f0 100644
--- a/fs/nilfs2/page.c
+++ b/fs/nilfs2/page.c
@@ -37,8 +37,7 @@
37 37
38#define NILFS_BUFFER_INHERENT_BITS \ 38#define NILFS_BUFFER_INHERENT_BITS \
39 ((1UL << BH_Uptodate) | (1UL << BH_Mapped) | (1UL << BH_NILFS_Node) | \ 39 ((1UL << BH_Uptodate) | (1UL << BH_Mapped) | (1UL << BH_NILFS_Node) | \
40 (1UL << BH_NILFS_Volatile) | (1UL << BH_NILFS_Allocated) | \ 40 (1UL << BH_NILFS_Volatile) | (1UL << BH_NILFS_Checked))
41 (1UL << BH_NILFS_Checked))
42 41
43static struct buffer_head * 42static struct buffer_head *
44__nilfs_get_page_block(struct page *page, unsigned long block, pgoff_t index, 43__nilfs_get_page_block(struct page *page, unsigned long block, pgoff_t index,
@@ -59,19 +58,6 @@ __nilfs_get_page_block(struct page *page, unsigned long block, pgoff_t index,
59 return bh; 58 return bh;
60} 59}
61 60
62/*
63 * Since the page cache of B-tree node pages or data page cache of pseudo
64 * inodes does not have a valid mapping->host pointer, calling
65 * mark_buffer_dirty() for their buffers causes a NULL pointer dereference;
66 * it calls __mark_inode_dirty(NULL) through __set_page_dirty().
67 * To avoid this problem, the old style mark_buffer_dirty() is used instead.
68 */
69void nilfs_mark_buffer_dirty(struct buffer_head *bh)
70{
71 if (!buffer_dirty(bh) && !test_set_buffer_dirty(bh))
72 __set_page_dirty_nobuffers(bh->b_page);
73}
74
75struct buffer_head *nilfs_grab_buffer(struct inode *inode, 61struct buffer_head *nilfs_grab_buffer(struct inode *inode,
76 struct address_space *mapping, 62 struct address_space *mapping,
77 unsigned long blkoff, 63 unsigned long blkoff,
@@ -79,8 +65,8 @@ struct buffer_head *nilfs_grab_buffer(struct inode *inode,
79{ 65{
80 int blkbits = inode->i_blkbits; 66 int blkbits = inode->i_blkbits;
81 pgoff_t index = blkoff >> (PAGE_CACHE_SHIFT - blkbits); 67 pgoff_t index = blkoff >> (PAGE_CACHE_SHIFT - blkbits);
82 struct page *page, *opage; 68 struct page *page;
83 struct buffer_head *bh, *obh; 69 struct buffer_head *bh;
84 70
85 page = grab_cache_page(mapping, index); 71 page = grab_cache_page(mapping, index);
86 if (unlikely(!page)) 72 if (unlikely(!page))
@@ -92,30 +78,6 @@ struct buffer_head *nilfs_grab_buffer(struct inode *inode,
92 page_cache_release(page); 78 page_cache_release(page);
93 return NULL; 79 return NULL;
94 } 80 }
95 if (!buffer_uptodate(bh) && mapping->assoc_mapping != NULL) {
96 /*
97 * Shadow page cache uses assoc_mapping to point its original
98 * page cache. The following code tries the original cache
99 * if the given cache is a shadow and it didn't hit.
100 */
101 opage = find_lock_page(mapping->assoc_mapping, index);
102 if (!opage)
103 return bh;
104
105 obh = __nilfs_get_page_block(opage, blkoff, index, blkbits,
106 b_state);
107 if (buffer_uptodate(obh)) {
108 nilfs_copy_buffer(bh, obh);
109 if (buffer_dirty(obh)) {
110 nilfs_mark_buffer_dirty(bh);
111 if (!buffer_nilfs_node(bh) && NILFS_MDT(inode))
112 nilfs_mdt_mark_dirty(inode);
113 }
114 }
115 brelse(obh);
116 unlock_page(opage);
117 page_cache_release(opage);
118 }
119 return bh; 81 return bh;
120} 82}
121 83
@@ -131,6 +93,7 @@ void nilfs_forget_buffer(struct buffer_head *bh)
131 lock_buffer(bh); 93 lock_buffer(bh);
132 clear_buffer_nilfs_volatile(bh); 94 clear_buffer_nilfs_volatile(bh);
133 clear_buffer_nilfs_checked(bh); 95 clear_buffer_nilfs_checked(bh);
96 clear_buffer_nilfs_redirected(bh);
134 clear_buffer_dirty(bh); 97 clear_buffer_dirty(bh);
135 if (nilfs_page_buffers_clean(page)) 98 if (nilfs_page_buffers_clean(page))
136 __nilfs_clear_page_dirty(page); 99 __nilfs_clear_page_dirty(page);
@@ -206,7 +169,7 @@ int nilfs_page_buffers_clean(struct page *page)
206void nilfs_page_bug(struct page *page) 169void nilfs_page_bug(struct page *page)
207{ 170{
208 struct address_space *m; 171 struct address_space *m;
209 unsigned long ino = 0; 172 unsigned long ino;
210 173
211 if (unlikely(!page)) { 174 if (unlikely(!page)) {
212 printk(KERN_CRIT "NILFS_PAGE_BUG(NULL)\n"); 175 printk(KERN_CRIT "NILFS_PAGE_BUG(NULL)\n");
@@ -214,11 +177,8 @@ void nilfs_page_bug(struct page *page)
214 } 177 }
215 178
216 m = page->mapping; 179 m = page->mapping;
217 if (m) { 180 ino = m ? m->host->i_ino : 0;
218 struct inode *inode = NILFS_AS_I(m); 181
219 if (inode != NULL)
220 ino = inode->i_ino;
221 }
222 printk(KERN_CRIT "NILFS_PAGE_BUG(%p): cnt=%d index#=%llu flags=0x%lx " 182 printk(KERN_CRIT "NILFS_PAGE_BUG(%p): cnt=%d index#=%llu flags=0x%lx "
223 "mapping=%p ino=%lu\n", 183 "mapping=%p ino=%lu\n",
224 page, atomic_read(&page->_count), 184 page, atomic_read(&page->_count),
@@ -240,56 +200,6 @@ void nilfs_page_bug(struct page *page)
240} 200}
241 201
242/** 202/**
243 * nilfs_alloc_private_page - allocate a private page with buffer heads
244 *
245 * Return Value: On success, a pointer to the allocated page is returned.
246 * On error, NULL is returned.
247 */
248struct page *nilfs_alloc_private_page(struct block_device *bdev, int size,
249 unsigned long state)
250{
251 struct buffer_head *bh, *head, *tail;
252 struct page *page;
253
254 page = alloc_page(GFP_NOFS); /* page_count of the returned page is 1 */
255 if (unlikely(!page))
256 return NULL;
257
258 lock_page(page);
259 head = alloc_page_buffers(page, size, 0);
260 if (unlikely(!head)) {
261 unlock_page(page);
262 __free_page(page);
263 return NULL;
264 }
265
266 bh = head;
267 do {
268 bh->b_state = (1UL << BH_NILFS_Allocated) | state;
269 tail = bh;
270 bh->b_bdev = bdev;
271 bh = bh->b_this_page;
272 } while (bh);
273
274 tail->b_this_page = head;
275 attach_page_buffers(page, head);
276
277 return page;
278}
279
280void nilfs_free_private_page(struct page *page)
281{
282 BUG_ON(!PageLocked(page));
283 BUG_ON(page->mapping);
284
285 if (page_has_buffers(page) && !try_to_free_buffers(page))
286 NILFS_PAGE_BUG(page, "failed to free page");
287
288 unlock_page(page);
289 __free_page(page);
290}
291
292/**
293 * nilfs_copy_page -- copy the page with buffers 203 * nilfs_copy_page -- copy the page with buffers
294 * @dst: destination page 204 * @dst: destination page
295 * @src: source page 205 * @src: source page
@@ -483,6 +393,7 @@ void nilfs_clear_dirty_pages(struct address_space *mapping)
483 clear_buffer_dirty(bh); 393 clear_buffer_dirty(bh);
484 clear_buffer_nilfs_volatile(bh); 394 clear_buffer_nilfs_volatile(bh);
485 clear_buffer_nilfs_checked(bh); 395 clear_buffer_nilfs_checked(bh);
396 clear_buffer_nilfs_redirected(bh);
486 clear_buffer_uptodate(bh); 397 clear_buffer_uptodate(bh);
487 clear_buffer_mapped(bh); 398 clear_buffer_mapped(bh);
488 unlock_buffer(bh); 399 unlock_buffer(bh);
@@ -514,6 +425,17 @@ unsigned nilfs_page_count_clean_buffers(struct page *page,
514 return nc; 425 return nc;
515} 426}
516 427
428void nilfs_mapping_init(struct address_space *mapping, struct inode *inode,
429 struct backing_dev_info *bdi)
430{
431 mapping->host = inode;
432 mapping->flags = 0;
433 mapping_set_gfp_mask(mapping, GFP_NOFS);
434 mapping->assoc_mapping = NULL;
435 mapping->backing_dev_info = bdi;
436 mapping->a_ops = &empty_aops;
437}
438
517/* 439/*
518 * NILFS2 needs clear_page_dirty() in the following two cases: 440 * NILFS2 needs clear_page_dirty() in the following two cases:
519 * 441 *
@@ -543,3 +465,87 @@ int __nilfs_clear_page_dirty(struct page *page)
543 } 465 }
544 return TestClearPageDirty(page); 466 return TestClearPageDirty(page);
545} 467}
468
469/**
470 * nilfs_find_uncommitted_extent - find extent of uncommitted data
471 * @inode: inode
472 * @start_blk: start block offset (in)
473 * @blkoff: start offset of the found extent (out)
474 *
475 * This function searches an extent of buffers marked "delayed" which
476 * starts from a block offset equal to or larger than @start_blk. If
477 * such an extent was found, this will store the start offset in
478 * @blkoff and return its length in blocks. Otherwise, zero is
479 * returned.
480 */
481unsigned long nilfs_find_uncommitted_extent(struct inode *inode,
482 sector_t start_blk,
483 sector_t *blkoff)
484{
485 unsigned int i;
486 pgoff_t index;
487 unsigned int nblocks_in_page;
488 unsigned long length = 0;
489 sector_t b;
490 struct pagevec pvec;
491 struct page *page;
492
493 if (inode->i_mapping->nrpages == 0)
494 return 0;
495
496 index = start_blk >> (PAGE_CACHE_SHIFT - inode->i_blkbits);
497 nblocks_in_page = 1U << (PAGE_CACHE_SHIFT - inode->i_blkbits);
498
499 pagevec_init(&pvec, 0);
500
501repeat:
502 pvec.nr = find_get_pages_contig(inode->i_mapping, index, PAGEVEC_SIZE,
503 pvec.pages);
504 if (pvec.nr == 0)
505 return length;
506
507 if (length > 0 && pvec.pages[0]->index > index)
508 goto out;
509
510 b = pvec.pages[0]->index << (PAGE_CACHE_SHIFT - inode->i_blkbits);
511 i = 0;
512 do {
513 page = pvec.pages[i];
514
515 lock_page(page);
516 if (page_has_buffers(page)) {
517 struct buffer_head *bh, *head;
518
519 bh = head = page_buffers(page);
520 do {
521 if (b < start_blk)
522 continue;
523 if (buffer_delay(bh)) {
524 if (length == 0)
525 *blkoff = b;
526 length++;
527 } else if (length > 0) {
528 goto out_locked;
529 }
530 } while (++b, bh = bh->b_this_page, bh != head);
531 } else {
532 if (length > 0)
533 goto out_locked;
534
535 b += nblocks_in_page;
536 }
537 unlock_page(page);
538
539 } while (++i < pagevec_count(&pvec));
540
541 index = page->index + 1;
542 pagevec_release(&pvec);
543 cond_resched();
544 goto repeat;
545
546out_locked:
547 unlock_page(page);
548out:
549 pagevec_release(&pvec);
550 return length;
551}