aboutsummaryrefslogtreecommitdiffstats
path: root/fs/buffer.c
diff options
context:
space:
mode:
Diffstat (limited to 'fs/buffer.c')
-rw-r--r--fs/buffer.c157
1 files changed, 10 insertions, 147 deletions
diff --git a/fs/buffer.c b/fs/buffer.c
index 9a73924db22f..cabc045f483d 100644
--- a/fs/buffer.c
+++ b/fs/buffer.c
@@ -185,10 +185,9 @@ EXPORT_SYMBOL(end_buffer_write_sync);
185 * we get exclusion from try_to_free_buffers with the blockdev mapping's 185 * we get exclusion from try_to_free_buffers with the blockdev mapping's
186 * private_lock. 186 * private_lock.
187 * 187 *
188 * Hack idea: for the blockdev mapping, i_bufferlist_lock contention 188 * Hack idea: for the blockdev mapping, private_lock contention
189 * may be quite high. This code could TryLock the page, and if that 189 * may be quite high. This code could TryLock the page, and if that
190 * succeeds, there is no need to take private_lock. (But if 190 * succeeds, there is no need to take private_lock.
191 * private_lock is contended then so is mapping->tree_lock).
192 */ 191 */
193static struct buffer_head * 192static struct buffer_head *
194__find_get_block_slow(struct block_device *bdev, sector_t block) 193__find_get_block_slow(struct block_device *bdev, sector_t block)
@@ -495,35 +494,12 @@ repeat:
495 return err; 494 return err;
496} 495}
497 496
498static void do_thaw_one(struct super_block *sb, void *unused) 497void emergency_thaw_bdev(struct super_block *sb)
499{ 498{
500 while (sb->s_bdev && !thaw_bdev(sb->s_bdev, sb)) 499 while (sb->s_bdev && !thaw_bdev(sb->s_bdev, sb))
501 printk(KERN_WARNING "Emergency Thaw on %pg\n", sb->s_bdev); 500 printk(KERN_WARNING "Emergency Thaw on %pg\n", sb->s_bdev);
502} 501}
503 502
504static void do_thaw_all(struct work_struct *work)
505{
506 iterate_supers(do_thaw_one, NULL);
507 kfree(work);
508 printk(KERN_WARNING "Emergency Thaw complete\n");
509}
510
511/**
512 * emergency_thaw_all -- forcibly thaw every frozen filesystem
513 *
514 * Used for emergency unfreeze of all filesystems via SysRq
515 */
516void emergency_thaw_all(void)
517{
518 struct work_struct *work;
519
520 work = kmalloc(sizeof(*work), GFP_ATOMIC);
521 if (work) {
522 INIT_WORK(work, do_thaw_all);
523 schedule_work(work);
524 }
525}
526
527/** 503/**
528 * sync_mapping_buffers - write out & wait upon a mapping's "associated" buffers 504 * sync_mapping_buffers - write out & wait upon a mapping's "associated" buffers
529 * @mapping: the mapping which wants those buffers written 505 * @mapping: the mapping which wants those buffers written
@@ -594,20 +570,21 @@ EXPORT_SYMBOL(mark_buffer_dirty_inode);
594 * 570 *
595 * The caller must hold lock_page_memcg(). 571 * The caller must hold lock_page_memcg().
596 */ 572 */
597static void __set_page_dirty(struct page *page, struct address_space *mapping, 573void __set_page_dirty(struct page *page, struct address_space *mapping,
598 int warn) 574 int warn)
599{ 575{
600 unsigned long flags; 576 unsigned long flags;
601 577
602 spin_lock_irqsave(&mapping->tree_lock, flags); 578 xa_lock_irqsave(&mapping->i_pages, flags);
603 if (page->mapping) { /* Race with truncate? */ 579 if (page->mapping) { /* Race with truncate? */
604 WARN_ON_ONCE(warn && !PageUptodate(page)); 580 WARN_ON_ONCE(warn && !PageUptodate(page));
605 account_page_dirtied(page, mapping); 581 account_page_dirtied(page, mapping);
606 radix_tree_tag_set(&mapping->page_tree, 582 radix_tree_tag_set(&mapping->i_pages,
607 page_index(page), PAGECACHE_TAG_DIRTY); 583 page_index(page), PAGECACHE_TAG_DIRTY);
608 } 584 }
609 spin_unlock_irqrestore(&mapping->tree_lock, flags); 585 xa_unlock_irqrestore(&mapping->i_pages, flags);
610} 586}
587EXPORT_SYMBOL_GPL(__set_page_dirty);
611 588
612/* 589/*
613 * Add a page to the dirty page list. 590 * Add a page to the dirty page list.
@@ -1095,7 +1072,7 @@ __getblk_slow(struct block_device *bdev, sector_t block,
1095 * inode list. 1072 * inode list.
1096 * 1073 *
1097 * mark_buffer_dirty() is atomic. It takes bh->b_page->mapping->private_lock, 1074 * mark_buffer_dirty() is atomic. It takes bh->b_page->mapping->private_lock,
1098 * mapping->tree_lock and mapping->host->i_lock. 1075 * i_pages lock and mapping->host->i_lock.
1099 */ 1076 */
1100void mark_buffer_dirty(struct buffer_head *bh) 1077void mark_buffer_dirty(struct buffer_head *bh)
1101{ 1078{
@@ -1511,7 +1488,7 @@ void block_invalidatepage(struct page *page, unsigned int offset,
1511 * The get_block cached value has been unconditionally invalidated, 1488 * The get_block cached value has been unconditionally invalidated,
1512 * so real IO is not possible anymore. 1489 * so real IO is not possible anymore.
1513 */ 1490 */
1514 if (offset == 0) 1491 if (length == PAGE_SIZE)
1515 try_to_release_page(page, 0); 1492 try_to_release_page(page, 0);
1516out: 1493out:
1517 return; 1494 return;
@@ -3450,120 +3427,6 @@ int bh_submit_read(struct buffer_head *bh)
3450} 3427}
3451EXPORT_SYMBOL(bh_submit_read); 3428EXPORT_SYMBOL(bh_submit_read);
3452 3429
3453/*
3454 * Seek for SEEK_DATA / SEEK_HOLE within @page, starting at @lastoff.
3455 *
3456 * Returns the offset within the file on success, and -ENOENT otherwise.
3457 */
3458static loff_t
3459page_seek_hole_data(struct page *page, loff_t lastoff, int whence)
3460{
3461 loff_t offset = page_offset(page);
3462 struct buffer_head *bh, *head;
3463 bool seek_data = whence == SEEK_DATA;
3464
3465 if (lastoff < offset)
3466 lastoff = offset;
3467
3468 bh = head = page_buffers(page);
3469 do {
3470 offset += bh->b_size;
3471 if (lastoff >= offset)
3472 continue;
3473
3474 /*
3475 * Unwritten extents that have data in the page cache covering
3476 * them can be identified by the BH_Unwritten state flag.
3477 * Pages with multiple buffers might have a mix of holes, data
3478 * and unwritten extents - any buffer with valid data in it
3479 * should have BH_Uptodate flag set on it.
3480 */
3481
3482 if ((buffer_unwritten(bh) || buffer_uptodate(bh)) == seek_data)
3483 return lastoff;
3484
3485 lastoff = offset;
3486 } while ((bh = bh->b_this_page) != head);
3487 return -ENOENT;
3488}
3489
3490/*
3491 * Seek for SEEK_DATA / SEEK_HOLE in the page cache.
3492 *
3493 * Within unwritten extents, the page cache determines which parts are holes
3494 * and which are data: unwritten and uptodate buffer heads count as data;
3495 * everything else counts as a hole.
3496 *
3497 * Returns the resulting offset on successs, and -ENOENT otherwise.
3498 */
3499loff_t
3500page_cache_seek_hole_data(struct inode *inode, loff_t offset, loff_t length,
3501 int whence)
3502{
3503 pgoff_t index = offset >> PAGE_SHIFT;
3504 pgoff_t end = DIV_ROUND_UP(offset + length, PAGE_SIZE);
3505 loff_t lastoff = offset;
3506 struct pagevec pvec;
3507
3508 if (length <= 0)
3509 return -ENOENT;
3510
3511 pagevec_init(&pvec);
3512
3513 do {
3514 unsigned nr_pages, i;
3515
3516 nr_pages = pagevec_lookup_range(&pvec, inode->i_mapping, &index,
3517 end - 1);
3518 if (nr_pages == 0)
3519 break;
3520
3521 for (i = 0; i < nr_pages; i++) {
3522 struct page *page = pvec.pages[i];
3523
3524 /*
3525 * At this point, the page may be truncated or
3526 * invalidated (changing page->mapping to NULL), or
3527 * even swizzled back from swapper_space to tmpfs file
3528 * mapping. However, page->index will not change
3529 * because we have a reference on the page.
3530 *
3531 * If current page offset is beyond where we've ended,
3532 * we've found a hole.
3533 */
3534 if (whence == SEEK_HOLE &&
3535 lastoff < page_offset(page))
3536 goto check_range;
3537
3538 lock_page(page);
3539 if (likely(page->mapping == inode->i_mapping) &&
3540 page_has_buffers(page)) {
3541 lastoff = page_seek_hole_data(page, lastoff, whence);
3542 if (lastoff >= 0) {
3543 unlock_page(page);
3544 goto check_range;
3545 }
3546 }
3547 unlock_page(page);
3548 lastoff = page_offset(page) + PAGE_SIZE;
3549 }
3550 pagevec_release(&pvec);
3551 } while (index < end);
3552
3553 /* When no page at lastoff and we are not done, we found a hole. */
3554 if (whence != SEEK_HOLE)
3555 goto not_found;
3556
3557check_range:
3558 if (lastoff < offset + length)
3559 goto out;
3560not_found:
3561 lastoff = -ENOENT;
3562out:
3563 pagevec_release(&pvec);
3564 return lastoff;
3565}
3566
3567void __init buffer_init(void) 3430void __init buffer_init(void)
3568{ 3431{
3569 unsigned long nrpages; 3432 unsigned long nrpages;