diff options
Diffstat (limited to 'fs/buffer.c')
-rw-r--r-- | fs/buffer.c | 55 |
1 files changed, 52 insertions, 3 deletions
diff --git a/fs/buffer.c b/fs/buffer.c index d2a4d1bb2d57..4d7433534f5c 100644 --- a/fs/buffer.c +++ b/fs/buffer.c | |||
@@ -83,6 +83,40 @@ void unlock_buffer(struct buffer_head *bh) | |||
83 | EXPORT_SYMBOL(unlock_buffer); | 83 | EXPORT_SYMBOL(unlock_buffer); |
84 | 84 | ||
85 | /* | 85 | /* |
86 | * Returns if the page has dirty or writeback buffers. If all the buffers | ||
87 | * are unlocked and clean then the PageDirty information is stale. If | ||
88 | * any of the pages are locked, it is assumed they are locked for IO. | ||
89 | */ | ||
90 | void buffer_check_dirty_writeback(struct page *page, | ||
91 | bool *dirty, bool *writeback) | ||
92 | { | ||
93 | struct buffer_head *head, *bh; | ||
94 | *dirty = false; | ||
95 | *writeback = false; | ||
96 | |||
97 | BUG_ON(!PageLocked(page)); | ||
98 | |||
99 | if (!page_has_buffers(page)) | ||
100 | return; | ||
101 | |||
102 | if (PageWriteback(page)) | ||
103 | *writeback = true; | ||
104 | |||
105 | head = page_buffers(page); | ||
106 | bh = head; | ||
107 | do { | ||
108 | if (buffer_locked(bh)) | ||
109 | *writeback = true; | ||
110 | |||
111 | if (buffer_dirty(bh)) | ||
112 | *dirty = true; | ||
113 | |||
114 | bh = bh->b_this_page; | ||
115 | } while (bh != head); | ||
116 | } | ||
117 | EXPORT_SYMBOL(buffer_check_dirty_writeback); | ||
118 | |||
119 | /* | ||
86 | * Block until a buffer comes unlocked. This doesn't stop it | 120 | * Block until a buffer comes unlocked. This doesn't stop it |
87 | * from becoming locked again - you have to lock it yourself | 121 | * from becoming locked again - you have to lock it yourself |
88 | * if you want to preserve its state. | 122 | * if you want to preserve its state. |
@@ -1454,7 +1488,8 @@ static void discard_buffer(struct buffer_head * bh) | |||
1454 | * block_invalidatepage - invalidate part or all of a buffer-backed page | 1488 | * block_invalidatepage - invalidate part or all of a buffer-backed page |
1455 | * | 1489 | * |
1456 | * @page: the page which is affected | 1490 | * @page: the page which is affected |
1457 | * @offset: the index of the truncation point | 1491 | * @offset: start of the range to invalidate |
1492 | * @length: length of the range to invalidate | ||
1458 | * | 1493 | * |
1459 | * block_invalidatepage() is called when all or part of the page has become | 1494 | * block_invalidatepage() is called when all or part of the page has become |
1460 | * invalidated by a truncate operation. | 1495 | * invalidated by a truncate operation. |
@@ -1465,15 +1500,22 @@ static void discard_buffer(struct buffer_head * bh) | |||
1465 | * point. Because the caller is about to free (and possibly reuse) those | 1500 | * point. Because the caller is about to free (and possibly reuse) those |
1466 | * blocks on-disk. | 1501 | * blocks on-disk. |
1467 | */ | 1502 | */ |
1468 | void block_invalidatepage(struct page *page, unsigned long offset) | 1503 | void block_invalidatepage(struct page *page, unsigned int offset, |
1504 | unsigned int length) | ||
1469 | { | 1505 | { |
1470 | struct buffer_head *head, *bh, *next; | 1506 | struct buffer_head *head, *bh, *next; |
1471 | unsigned int curr_off = 0; | 1507 | unsigned int curr_off = 0; |
1508 | unsigned int stop = length + offset; | ||
1472 | 1509 | ||
1473 | BUG_ON(!PageLocked(page)); | 1510 | BUG_ON(!PageLocked(page)); |
1474 | if (!page_has_buffers(page)) | 1511 | if (!page_has_buffers(page)) |
1475 | goto out; | 1512 | goto out; |
1476 | 1513 | ||
1514 | /* | ||
1515 | * Check for overflow | ||
1516 | */ | ||
1517 | BUG_ON(stop > PAGE_CACHE_SIZE || stop < length); | ||
1518 | |||
1477 | head = page_buffers(page); | 1519 | head = page_buffers(page); |
1478 | bh = head; | 1520 | bh = head; |
1479 | do { | 1521 | do { |
@@ -1481,6 +1523,12 @@ void block_invalidatepage(struct page *page, unsigned long offset) | |||
1481 | next = bh->b_this_page; | 1523 | next = bh->b_this_page; |
1482 | 1524 | ||
1483 | /* | 1525 | /* |
1526 | * Are we still fully in range ? | ||
1527 | */ | ||
1528 | if (next_off > stop) | ||
1529 | goto out; | ||
1530 | |||
1531 | /* | ||
1484 | * is this block fully invalidated? | 1532 | * is this block fully invalidated? |
1485 | */ | 1533 | */ |
1486 | if (offset <= curr_off) | 1534 | if (offset <= curr_off) |
@@ -1501,6 +1549,7 @@ out: | |||
1501 | } | 1549 | } |
1502 | EXPORT_SYMBOL(block_invalidatepage); | 1550 | EXPORT_SYMBOL(block_invalidatepage); |
1503 | 1551 | ||
1552 | |||
1504 | /* | 1553 | /* |
1505 | * We attach and possibly dirty the buffers atomically wrt | 1554 | * We attach and possibly dirty the buffers atomically wrt |
1506 | * __set_page_dirty_buffers() via private_lock. try_to_free_buffers | 1555 | * __set_page_dirty_buffers() via private_lock. try_to_free_buffers |
@@ -2841,7 +2890,7 @@ int block_write_full_page_endio(struct page *page, get_block_t *get_block, | |||
2841 | * they may have been added in ext3_writepage(). Make them | 2890 | * they may have been added in ext3_writepage(). Make them |
2842 | * freeable here, so the page does not leak. | 2891 | * freeable here, so the page does not leak. |
2843 | */ | 2892 | */ |
2844 | do_invalidatepage(page, 0); | 2893 | do_invalidatepage(page, 0, PAGE_CACHE_SIZE); |
2845 | unlock_page(page); | 2894 | unlock_page(page); |
2846 | return 0; /* don't care */ | 2895 | return 0; /* don't care */ |
2847 | } | 2896 | } |