aboutsummaryrefslogtreecommitdiffstats
path: root/fs/buffer.c
diff options
context:
space:
mode:
authorThomas Gleixner <tglx@mtd.linutronix.de>2005-11-06 09:36:37 -0500
committerThomas Gleixner <tglx@mtd.linutronix.de>2005-11-06 09:36:37 -0500
commit2fc2991175bf77395e6b15fe6b2304d3bf72da40 (patch)
treeb0ff38c09240e7c00e1577d447ebe89143d752dc /fs/buffer.c
parent8b491d750885ebe8e7d385ce4186c85957d67123 (diff)
parent7015faa7df829876a0f931cd18aa6d7c24a1b581 (diff)
Merge branch 'master' of /home/tglx/work/mtd/git/linux-2.6.git/
Diffstat (limited to 'fs/buffer.c')
-rw-r--r--fs/buffer.c68
1 files changed, 40 insertions, 28 deletions
diff --git a/fs/buffer.c b/fs/buffer.c
index 6a25d7df89b1..35fa34977e81 100644
--- a/fs/buffer.c
+++ b/fs/buffer.c
@@ -40,6 +40,7 @@
40#include <linux/cpu.h> 40#include <linux/cpu.h>
41#include <linux/bitops.h> 41#include <linux/bitops.h>
42#include <linux/mpage.h> 42#include <linux/mpage.h>
43#include <linux/bit_spinlock.h>
43 44
44static int fsync_buffers_list(spinlock_t *lock, struct list_head *list); 45static int fsync_buffers_list(spinlock_t *lock, struct list_head *list);
45static void invalidate_bh_lrus(void); 46static void invalidate_bh_lrus(void);
@@ -95,7 +96,7 @@ static void
95__clear_page_buffers(struct page *page) 96__clear_page_buffers(struct page *page)
96{ 97{
97 ClearPagePrivate(page); 98 ClearPagePrivate(page);
98 page->private = 0; 99 set_page_private(page, 0);
99 page_cache_release(page); 100 page_cache_release(page);
100} 101}
101 102
@@ -501,7 +502,7 @@ static void free_more_memory(void)
501 yield(); 502 yield();
502 503
503 for_each_pgdat(pgdat) { 504 for_each_pgdat(pgdat) {
504 zones = pgdat->node_zonelists[GFP_NOFS&GFP_ZONEMASK].zones; 505 zones = pgdat->node_zonelists[gfp_zone(GFP_NOFS)].zones;
505 if (*zones) 506 if (*zones)
506 try_to_free_pages(zones, GFP_NOFS); 507 try_to_free_pages(zones, GFP_NOFS);
507 } 508 }
@@ -917,8 +918,7 @@ static int fsync_buffers_list(spinlock_t *lock, struct list_head *list)
917 * contents - it is a noop if I/O is still in 918 * contents - it is a noop if I/O is still in
918 * flight on potentially older contents. 919 * flight on potentially older contents.
919 */ 920 */
920 wait_on_buffer(bh); 921 ll_rw_block(SWRITE, 1, &bh);
921 ll_rw_block(WRITE, 1, &bh);
922 brelse(bh); 922 brelse(bh);
923 spin_lock(lock); 923 spin_lock(lock);
924 } 924 }
@@ -1478,8 +1478,10 @@ EXPORT_SYMBOL(__getblk);
1478void __breadahead(struct block_device *bdev, sector_t block, int size) 1478void __breadahead(struct block_device *bdev, sector_t block, int size)
1479{ 1479{
1480 struct buffer_head *bh = __getblk(bdev, block, size); 1480 struct buffer_head *bh = __getblk(bdev, block, size);
1481 ll_rw_block(READA, 1, &bh); 1481 if (likely(bh)) {
1482 brelse(bh); 1482 ll_rw_block(READA, 1, &bh);
1483 brelse(bh);
1484 }
1483} 1485}
1484EXPORT_SYMBOL(__breadahead); 1486EXPORT_SYMBOL(__breadahead);
1485 1487
@@ -1497,7 +1499,7 @@ __bread(struct block_device *bdev, sector_t block, int size)
1497{ 1499{
1498 struct buffer_head *bh = __getblk(bdev, block, size); 1500 struct buffer_head *bh = __getblk(bdev, block, size);
1499 1501
1500 if (!buffer_uptodate(bh)) 1502 if (likely(bh) && !buffer_uptodate(bh))
1501 bh = __bread_slow(bh); 1503 bh = __bread_slow(bh);
1502 return bh; 1504 return bh;
1503} 1505}
@@ -1571,7 +1573,7 @@ static inline void discard_buffer(struct buffer_head * bh)
1571 * 1573 *
1572 * NOTE: @gfp_mask may go away, and this function may become non-blocking. 1574 * NOTE: @gfp_mask may go away, and this function may become non-blocking.
1573 */ 1575 */
1574int try_to_release_page(struct page *page, int gfp_mask) 1576int try_to_release_page(struct page *page, gfp_t gfp_mask)
1575{ 1577{
1576 struct address_space * const mapping = page->mapping; 1578 struct address_space * const mapping = page->mapping;
1577 1579
@@ -1637,6 +1639,15 @@ out:
1637} 1639}
1638EXPORT_SYMBOL(block_invalidatepage); 1640EXPORT_SYMBOL(block_invalidatepage);
1639 1641
1642int do_invalidatepage(struct page *page, unsigned long offset)
1643{
1644 int (*invalidatepage)(struct page *, unsigned long);
1645 invalidatepage = page->mapping->a_ops->invalidatepage;
1646 if (invalidatepage == NULL)
1647 invalidatepage = block_invalidatepage;
1648 return (*invalidatepage)(page, offset);
1649}
1650
1640/* 1651/*
1641 * We attach and possibly dirty the buffers atomically wrt 1652 * We attach and possibly dirty the buffers atomically wrt
1642 * __set_page_dirty_buffers() via private_lock. try_to_free_buffers 1653 * __set_page_dirty_buffers() via private_lock. try_to_free_buffers
@@ -2696,7 +2707,7 @@ int block_write_full_page(struct page *page, get_block_t *get_block,
2696 * they may have been added in ext3_writepage(). Make them 2707 * they may have been added in ext3_writepage(). Make them
2697 * freeable here, so the page does not leak. 2708 * freeable here, so the page does not leak.
2698 */ 2709 */
2699 block_invalidatepage(page, 0); 2710 do_invalidatepage(page, 0);
2700 unlock_page(page); 2711 unlock_page(page);
2701 return 0; /* don't care */ 2712 return 0; /* don't care */
2702 } 2713 }
@@ -2793,21 +2804,22 @@ int submit_bh(int rw, struct buffer_head * bh)
2793 2804
2794/** 2805/**
2795 * ll_rw_block: low-level access to block devices (DEPRECATED) 2806 * ll_rw_block: low-level access to block devices (DEPRECATED)
2796 * @rw: whether to %READ or %WRITE or maybe %READA (readahead) 2807 * @rw: whether to %READ or %WRITE or %SWRITE or maybe %READA (readahead)
2797 * @nr: number of &struct buffer_heads in the array 2808 * @nr: number of &struct buffer_heads in the array
2798 * @bhs: array of pointers to &struct buffer_head 2809 * @bhs: array of pointers to &struct buffer_head
2799 * 2810 *
2800 * ll_rw_block() takes an array of pointers to &struct buffer_heads, 2811 * ll_rw_block() takes an array of pointers to &struct buffer_heads, and
2801 * and requests an I/O operation on them, either a %READ or a %WRITE. 2812 * requests an I/O operation on them, either a %READ or a %WRITE. The third
2802 * The third %READA option is described in the documentation for 2813 * %SWRITE is like %WRITE only we make sure that the *current* data in buffers
2803 * generic_make_request() which ll_rw_block() calls. 2814 * are sent to disk. The fourth %READA option is described in the documentation
2815 * for generic_make_request() which ll_rw_block() calls.
2804 * 2816 *
2805 * This function drops any buffer that it cannot get a lock on (with the 2817 * This function drops any buffer that it cannot get a lock on (with the
2806 * BH_Lock state bit), any buffer that appears to be clean when doing a 2818 * BH_Lock state bit) unless SWRITE is required, any buffer that appears to be
2807 * write request, and any buffer that appears to be up-to-date when doing 2819 * clean when doing a write request, and any buffer that appears to be
2808 * read request. Further it marks as clean buffers that are processed for 2820 * up-to-date when doing read request. Further it marks as clean buffers that
2809 * writing (the buffer cache won't assume that they are actually clean until 2821 * are processed for writing (the buffer cache won't assume that they are
2810 * the buffer gets unlocked). 2822 * actually clean until the buffer gets unlocked).
2811 * 2823 *
2812 * ll_rw_block sets b_end_io to simple completion handler that marks 2824 * ll_rw_block sets b_end_io to simple completion handler that marks
2813 * the buffer up-to-date (if approriate), unlocks the buffer and wakes 2825 * the buffer up-to-date (if approriate), unlocks the buffer and wakes
@@ -2823,11 +2835,13 @@ void ll_rw_block(int rw, int nr, struct buffer_head *bhs[])
2823 for (i = 0; i < nr; i++) { 2835 for (i = 0; i < nr; i++) {
2824 struct buffer_head *bh = bhs[i]; 2836 struct buffer_head *bh = bhs[i];
2825 2837
2826 if (test_set_buffer_locked(bh)) 2838 if (rw == SWRITE)
2839 lock_buffer(bh);
2840 else if (test_set_buffer_locked(bh))
2827 continue; 2841 continue;
2828 2842
2829 get_bh(bh); 2843 get_bh(bh);
2830 if (rw == WRITE) { 2844 if (rw == WRITE || rw == SWRITE) {
2831 if (test_clear_buffer_dirty(bh)) { 2845 if (test_clear_buffer_dirty(bh)) {
2832 bh->b_end_io = end_buffer_write_sync; 2846 bh->b_end_io = end_buffer_write_sync;
2833 submit_bh(WRITE, bh); 2847 submit_bh(WRITE, bh);
@@ -3042,14 +3056,13 @@ static void recalc_bh_state(void)
3042 buffer_heads_over_limit = (tot > max_buffer_heads); 3056 buffer_heads_over_limit = (tot > max_buffer_heads);
3043} 3057}
3044 3058
3045struct buffer_head *alloc_buffer_head(unsigned int __nocast gfp_flags) 3059struct buffer_head *alloc_buffer_head(gfp_t gfp_flags)
3046{ 3060{
3047 struct buffer_head *ret = kmem_cache_alloc(bh_cachep, gfp_flags); 3061 struct buffer_head *ret = kmem_cache_alloc(bh_cachep, gfp_flags);
3048 if (ret) { 3062 if (ret) {
3049 preempt_disable(); 3063 get_cpu_var(bh_accounting).nr++;
3050 __get_cpu_var(bh_accounting).nr++;
3051 recalc_bh_state(); 3064 recalc_bh_state();
3052 preempt_enable(); 3065 put_cpu_var(bh_accounting);
3053 } 3066 }
3054 return ret; 3067 return ret;
3055} 3068}
@@ -3059,10 +3072,9 @@ void free_buffer_head(struct buffer_head *bh)
3059{ 3072{
3060 BUG_ON(!list_empty(&bh->b_assoc_buffers)); 3073 BUG_ON(!list_empty(&bh->b_assoc_buffers));
3061 kmem_cache_free(bh_cachep, bh); 3074 kmem_cache_free(bh_cachep, bh);
3062 preempt_disable(); 3075 get_cpu_var(bh_accounting).nr--;
3063 __get_cpu_var(bh_accounting).nr--;
3064 recalc_bh_state(); 3076 recalc_bh_state();
3065 preempt_enable(); 3077 put_cpu_var(bh_accounting);
3066} 3078}
3067EXPORT_SYMBOL(free_buffer_head); 3079EXPORT_SYMBOL(free_buffer_head);
3068 3080